diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi')
150 files changed, 16122 insertions, 8472 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 20971304fdef..4b04865fc2c9 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -46,6 +46,15 @@ config IWLWIFI if IWLWIFI +config IWLWIFI_KUNIT_TESTS + tristate + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable this option for iwlwifi kunit tests. + + If unsure, say N. + config IWLWIFI_LEDS bool depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211 diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index b983982aee45..19c4ce6f2465 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -4,20 +4,21 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi.o iwlwifi-objs += iwl-io.o iwlwifi-objs += iwl-drv.o iwlwifi-objs += iwl-debug.o -iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o +iwlwifi-objs += iwl-nvm-utils.o +iwlwifi-objs += iwl-utils.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o -iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o +iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o iwlwifi-objs += iwl-dbg-tlv.o iwlwifi-objs += iwl-trans.o -iwlwifi-objs += queue/tx.o iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o +iwlwifi-objs += fw/regulatory.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o iwlwifi-$(CONFIG_EFI) += fw/uefi.o @@ -33,4 +34,6 @@ obj-$(CONFIG_IWLDVM) += dvm/ obj-$(CONFIG_IWLMVM) += mvm/ obj-$(CONFIG_IWLMEI) += mei/ +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/ + CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index d594694206b3..2e2fcb3807ef 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include <linux/module.h> #include <linux/stringify.h> @@ -13,7 +13,7 @@ #define IWL_22000_UCODE_API_MAX 77 /* Lowest firmware API version supported */ -#define IWL_22000_UCODE_API_MIN 50 +#define IWL_22000_UCODE_API_MIN 77 /* NVM versions */ #define IWL_22000_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c index 134635c70ce8..dcba1a5d793b 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include <linux/module.h> #include <linux/stringify.h> @@ -10,10 +10,10 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_AX210_UCODE_API_MAX 86 +#define IWL_AX210_UCODE_API_MAX 89 /* Lowest firmware API version supported */ -#define IWL_AX210_UCODE_API_MIN 59 +#define IWL_AX210_UCODE_API_MIN 77 /* NVM versions */ #define IWL_AX210_NVM_VERSION 0x0a1d @@ -31,40 +31,21 @@ #define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0" #define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0" #define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0" -#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0" #define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0" #define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0" #define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0" -#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0" #define IWL_MA_B_HR_B_FW_PRE "iwlwifi-ma-b0-hr-b0" #define IWL_MA_B_GF_A_FW_PRE "iwlwifi-ma-b0-gf-a0" #define IWL_MA_B_GF4_A_FW_PRE "iwlwifi-ma-b0-gf4-a0" -#define IWL_MA_B_MR_A_FW_PRE "iwlwifi-ma-b0-mr-a0" #define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \ IWL_SO_A_JF_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \ IWL_SO_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_SO_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_TY_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" #define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \ IWL_MA_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_MR_A_FW_PRE "-" __stringify(api) ".ucode" #define IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(api) \ IWL_MA_B_HR_B_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_B_GF_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_B_GF4_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_B_MR_A_FW_PRE "-" __stringify(api) ".ucode" static const struct iwl_base_params iwl_ax210_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -267,13 +248,6 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { .trans.low_latency_xtal = true, }; -const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = { - .fw_name_pre = IWL_SO_A_MR_A_FW_PRE, - .uhb_supported = false, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - const struct iwl_cfg iwl_cfg_ma = { .fw_name_mac = "ma", .uhb_supported = true, @@ -289,13 +263,11 @@ const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = { MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +IWL_FW_AND_PNVM(IWL_SO_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_TY_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX); MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +IWL_FW_AND_PNVM(IWL_MA_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_MA_A_GF4_A_FW_PRE, IWL_AX210_UCODE_API_MAX); MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +IWL_FW_AND_PNVM(IWL_MA_B_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_MA_B_GF4_A_FW_PRE, IWL_AX210_UCODE_API_MAX); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index 82da957adcf6..efa3e0e35f79 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include <linux/module.h> #include <linux/stringify.h> @@ -10,10 +10,10 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_BZ_UCODE_API_MAX 86 +#define IWL_BZ_UCODE_API_MAX 96 /* Lowest firmware API version supported */ -#define IWL_BZ_UCODE_API_MIN 80 +#define IWL_BZ_UCODE_API_MIN 92 /* NVM versions */ #define IWL_BZ_NVM_VERSION 0x0a1d @@ -37,20 +37,6 @@ #define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_BZ_A_FM_B_MODULE_FIRMWARE(api) \ - IWL_BZ_A_FM_B_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_BZ_A_FM_C_MODULE_FIRMWARE(api) \ - IWL_BZ_A_FM_C_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_BZ_A_FM4_B_MODULE_FIRMWARE(api) \ - IWL_BZ_A_FM4_B_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_GL_B_FM_B_MODULE_FIRMWARE(api) \ - IWL_GL_B_FM_B_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_GL_C_FM_C_MODULE_FIRMWARE(api) \ - IWL_GL_C_FM_C_FW_PRE "-" __stringify(api) ".ucode" static const struct iwl_base_params iwl_bz_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -129,10 +115,6 @@ static const struct iwl_base_params iwl_bz_base_params = { IWL_DEVICE_BZ_COMMON, \ .ht_params = &iwl_22000_ht_params -#define IWL_DEVICE_GL_A \ - IWL_DEVICE_BZ_COMMON, \ - .ht_params = &iwl_gl_a_ht_params - /* * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an * A-MPDU, with additional overhead to account for processing time. @@ -152,7 +134,22 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; +const struct iwl_cfg_trans_params iwl_gl_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_BZ, + .base_params = &iwl_bz_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, +}; + const char iwl_bz_name[] = "Intel(R) TBD Bz device"; +const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz"; +const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz"; +const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz"; +const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz"; const struct iwl_cfg iwl_cfg_bz = { .fw_name_mac = "bz", @@ -170,12 +167,11 @@ const struct iwl_cfg iwl_cfg_gl = { .num_rbds = IWL_NUM_RBDS_BZ_EHT, }; - MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +IWL_FW_AND_PNVM(IWL_BZ_A_GF_A_FW_PRE, IWL_BZ_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_BZ_A_GF4_A_FW_PRE, IWL_BZ_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c new file mode 100644 index 000000000000..ab7c0f8d54f4 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024 Intel Corporation + */ +#include <linux/module.h> +#include <linux/stringify.h> +#include "iwl-config.h" +#include "iwl-prph.h" +#include "fw/api/txq.h" + +/* Highest firmware API version supported */ +#define IWL_DR_UCODE_API_MAX 96 + +/* Lowest firmware API version supported */ +#define IWL_DR_UCODE_API_MIN 96 + +/* NVM versions */ +#define IWL_DR_NVM_VERSION 0x0a1d + +/* Memory offsets and lengths */ +#define IWL_DR_DCCM_OFFSET 0x800000 /* LMAC1 */ +#define IWL_DR_DCCM_LEN 0x10000 /* LMAC1 */ +#define IWL_DR_DCCM2_OFFSET 0x880000 +#define IWL_DR_DCCM2_LEN 0x8000 +#define IWL_DR_SMEM_OFFSET 0x400000 +#define IWL_DR_SMEM_LEN 0xD0000 + +#define IWL_DR_A_PE_A_FW_PRE "iwlwifi-dr-a0-pe-a0" +#define IWL_BR_A_PET_A_FW_PRE "iwlwifi-br-a0-petc-a0" +#define IWL_BR_A_PE_A_FW_PRE "iwlwifi-br-a0-pe-a0" + +#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \ + IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(api) \ + IWL_BR_A_PET_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(api) \ + IWL_BR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode" + +static const struct iwl_base_params iwl_dr_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, + .num_of_queues = 512, + .max_tfd_queue_size = 65536, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +#define IWL_DEVICE_DR_COMMON \ + .ucode_api_max = IWL_DR_UCODE_API_MAX, \ + .ucode_api_min = IWL_DR_UCODE_API_MIN, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = 10, \ + .non_shared_ant = ANT_B, \ + .dccm_offset = IWL_DR_DCCM_OFFSET, \ + .dccm_len = IWL_DR_DCCM_LEN, \ + .dccm2_offset = IWL_DR_DCCM2_OFFSET, \ + .dccm2_len = IWL_DR_DCCM2_LEN, \ + .smem_offset = IWL_DR_SMEM_OFFSET, \ + .smem_len = IWL_DR_SMEM_LEN, \ + .apmg_not_supported = true, \ + .trans.mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = 0x30, \ + .nvm_ver = IWL_DR_NVM_VERSION, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .min_umac_error_event_table = 0xD0000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024, \ + .mon_smem_regs = { \ + .write_ptr = { \ + .addr = LDBG_M2S_BUF_WPTR, \ + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = LDBG_M2S_BUF_WRAP_CNT, \ + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ + }, \ + }, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_DR, \ + .trans.base_params = &iwl_dr_base_params, \ + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ + .mon_dram_regs = { \ + .write_ptr = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = DBGC_DBGBUF_WRAP_AROUND, \ + .mask = 0xffffffff, \ + }, \ + .cur_frag = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ + }, \ + }, \ + .mon_dbgi_regs = { \ + .write_ptr = { \ + .addr = DBGI_SRAM_FIFO_POINTERS, \ + .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ + }, \ + } + +#define IWL_DEVICE_DR \ + IWL_DEVICE_DR_COMMON, \ + .uhb_supported = true, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .num_rbds = IWL_NUM_RBDS_DR_EHT, \ + .ht_params = &iwl_22000_ht_params + +/* + * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an + * A-MPDU, with additional overhead to account for processing time. + */ +#define IWL_NUM_RBDS_DR_EHT (512 * 16) + +const struct iwl_cfg_trans_params iwl_dr_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_DR, + .base_params = &iwl_dr_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + +const char iwl_dr_name[] = "Intel(R) TBD Dr device"; + +const struct iwl_cfg iwl_cfg_dr = { + .fw_name_mac = "dr", + IWL_DEVICE_DR, +}; + +const struct iwl_cfg_trans_params iwl_br_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_DR, + .base_params = &iwl_dr_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + +const char iwl_br_name[] = "Intel(R) TBD Br device"; + +const struct iwl_cfg iwl_cfg_br = { + .fw_name_mac = "br", + IWL_DEVICE_DR, +}; + +MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index 80eb9b499538..c9eeb3f6704e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include <linux/module.h> #include <linux/stringify.h> @@ -10,10 +10,10 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_SC_UCODE_API_MAX 86 +#define IWL_SC_UCODE_API_MAX 96 /* Lowest firmware API version supported */ -#define IWL_SC_UCODE_API_MIN 82 +#define IWL_SC_UCODE_API_MIN 92 /* NVM versions */ #define IWL_SC_NVM_VERSION 0x0a1d @@ -33,21 +33,15 @@ #define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0" #define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0" #define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0" +#define IWL_SC2_A_FM_C_FW_PRE "iwlwifi-sc2-a0-fm-c0" +#define IWL_SC2_A_WH_A_FW_PRE "iwlwifi-sc2-a0-wh-a0" +#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0" +#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0" -#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \ - IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(api) \ - IWL_SC_A_FM_C_FW_PRE "-" __stringify(api) ".ucode" #define IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(api) \ IWL_SC_A_HR_A_FW_PRE "-" __stringify(api) ".ucode" #define IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(api) \ IWL_SC_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(api) \ - IWL_SC_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(api) \ - IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \ - IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode" static const struct iwl_base_params iwl_sc_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -124,6 +118,9 @@ static const struct iwl_base_params iwl_sc_base_params = { #define IWL_DEVICE_SC \ IWL_DEVICE_BZ_COMMON, \ + .uhb_supported = true, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .num_rbds = IWL_NUM_RBDS_SC_EHT, \ .ht_params = &iwl_22000_ht_params /* @@ -149,16 +146,31 @@ const char iwl_sc_name[] = "Intel(R) TBD Sc device"; const struct iwl_cfg iwl_cfg_sc = { .fw_name_mac = "sc", - .uhb_supported = true, IWL_DEVICE_SC, - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, - .num_rbds = IWL_NUM_RBDS_SC_EHT, }; -MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device"; + +const struct iwl_cfg iwl_cfg_sc2 = { + .fw_name_mac = "sc2", + IWL_DEVICE_SC, +}; + +const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device"; + +const struct iwl_cfg iwl_cfg_sc2f = { + .fw_name_mac = "sc2f", + IWL_DEVICE_SC, +}; + +IWL_FW_AND_PNVM(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX); MODULE_FIRMWARE(IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +IWL_FW_AND_PNVM(IWL_SC_A_GF_A_FW_PRE, IWL_SC_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_SC_A_GF4_A_FW_PRE, IWL_SC_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_SC2F_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX); +IWL_FW_AND_PNVM(IWL_SC2F_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile index 0486b17d7c41..abcf8aeb010d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile @@ -2,7 +2,7 @@ # DVM obj-$(CONFIG_IWLDVM) += iwldvm.o iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o -iwldvm-objs += lib.o calib.o tt.o sta.o rx.o +iwldvm-objs += lib.o calib.o tt.o sta.o rx.o eeprom.o iwldvm-objs += power.o iwldvm-objs += scan.o @@ -11,4 +11,4 @@ iwldvm-objs += rxon.o devices.o iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o -ccflags-y += -I $(srctree)/$(src)/../ +ccflags-y += -I $(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index fefaa414272b..a13add556a7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2021 Intel Corporation + * Copyright (C) 2005-2014, 2021, 2024 Intel Corporation */ #ifndef __iwl_agn_h__ #define __iwl_agn_h__ @@ -385,6 +385,25 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state) iwl_trans_set_pmi(priv->trans, state); } +/** + * iwl_parse_eeprom_data - parse EEPROM data and return values + * + * @trans: ransport we're parsing for, for debug only + * @cfg: device configuration for parsing and overrides + * @eeprom: the EEPROM data + * @eeprom_size: length of the EEPROM data + * + * This function parses all EEPROM values we need and then + * returns a (newly allocated) struct containing all the + * relevant values for driver use. The struct must be freed + * later with iwl_free_nvm_data(). + */ +struct iwl_nvm_data * +iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const u8 *eeprom, size_t eeprom_size); + +int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size); + #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir); #else diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 04864d3fda63..3f49c0bccb28 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2023 Intel Corporation + * Copyright (C) 2005-2014, 2023-2024 Intel Corporation */ /* * Please use this file (commands.h) only for uCode API definitions. @@ -177,7 +177,7 @@ enum { * *****************************************************************************/ -/** +/* * iwlagn rate_n_flags bit fields * * rate_n_flags format is used in following iwlagn commands: @@ -251,7 +251,7 @@ enum { #define RATE_MCS_SGI_POS 13 #define RATE_MCS_SGI_MSK 0x2000 -/** +/* * rate_n_flags Tx antenna masks * bit14:16 */ @@ -2767,7 +2767,7 @@ struct iwl_missed_beacon_notif { * *****************************************************************************/ -/** +/* * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) * * This command sets up the Rx signal detector for a sensitivity level that diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 25283e4b849f..4ac8b862ad41 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -19,7 +19,7 @@ #include <linux/mutex.h> #include "fw/img.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "iwl-csr.h" #include "iwl-debug.h" #include "iwl-agn-hw.h" diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c index 39e40901fa46..48a8349680fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -12,7 +12,7 @@ */ #include "iwl-io.h" #include "iwl-prph.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "agn.h" #include "dev.h" diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c index 5aab64c63a13..cdc05f7e75a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c @@ -1,16 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation - * Copyright (C) 2015 Intel Mobile Communications GmbH + * Copyright (C) 2005-2014, 2018-2019, 2021, 2024 Intel Corporation */ #include <linux/types.h> #include <linux/slab.h> #include <linux/export.h> + #include "iwl-drv.h" -#include "iwl-modparams.h" -#include "iwl-eeprom-parse.h" +#include "iwl-debug.h" +#include "iwl-io.h" +#include "iwl-prph.h" +#include "iwl-csr.h" +#include "agn.h" -#if IS_ENABLED(CONFIG_IWLDVM) /* EEPROM offset definitions */ /* indirect access definitions */ @@ -79,7 +81,6 @@ enum eeprom_sku_bits { #define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ #define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ - /* * EEPROM bands * These are the channel numbers from each band in the order @@ -257,7 +258,6 @@ struct iwl_eeprom_channel { s8 max_power_avg; } __packed; - enum iwl_eeprom_enhanced_txpwr_flags { IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0), IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1), @@ -270,7 +270,7 @@ enum iwl_eeprom_enhanced_txpwr_flags { }; /** - * struct iwl_eeprom_enhanced_txpwr + * struct iwl_eeprom_enhanced_txpwr - enhanced regulatory TX power limits * @flags: entry flags * @channel: channel number * @chain_a_max: chain a max power in 1/2 dBm @@ -648,114 +648,385 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, return n_channels; } -#endif +/* + * EEPROM access time values: + * + * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. + * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). + * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. + * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. + */ +#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ -int iwl_init_sband_channels(struct iwl_nvm_data *data, - struct ieee80211_supported_band *sband, - int n_channels, enum nl80211_band band) +/* + * The device's EEPROM semaphore prevents conflicts between driver and uCode + * when accessing the EEPROM; each access is a series of pulses to/from the + * EEPROM chip, not a single event, so even reads could conflict if they + * weren't arbitrated by the semaphore. + */ +#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ +#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + + +static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans) { - struct ieee80211_channel *chan = &data->channels[0]; - int n = 0, idx = 0; + u16 count; + int ret; + + for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) { + /* Request semaphore */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM); + + /* See if we got it */ + ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM, + IWL_EEPROM_SEM_TIMEOUT); + if (ret >= 0) { + IWL_DEBUG_EEPROM(trans->dev, + "Acquired semaphore after %d tries.\n", + count+1); + return ret; + } + } - while (idx < n_channels && chan->band != band) - chan = &data->channels[++idx]; + return ret; +} - sband->channels = &data->channels[idx]; +static void iwl_eeprom_release_semaphore(struct iwl_trans *trans) +{ + iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM); +} - while (idx < n_channels && chan->band == band) { - chan = &data->channels[++idx]; - n++; - } +static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp) +{ + u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; - sband->n_channels = n; + IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp); - return n; + switch (gp) { + case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: + if (!nvm_is_otp) { + IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", + gp); + return -ENOENT; + } + return 0; + case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: + case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: + if (nvm_is_otp) { + IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); + return -ENOENT; + } + return 0; + case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: + default: + IWL_ERR(trans, + "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n", + nvm_is_otp ? "OTP" : "EEPROM", gp); + return -ENOENT; + } } -#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ -#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ +/****************************************************************************** + * + * OTP related functions + * +******************************************************************************/ -void iwl_init_ht_hw_capab(struct iwl_trans *trans, - struct iwl_nvm_data *data, - struct ieee80211_sta_ht_cap *ht_info, - enum nl80211_band band, - u8 tx_chains, u8 rx_chains) +static void iwl_set_otp_access_absolute(struct iwl_trans *trans) { - const struct iwl_cfg *cfg = trans->cfg; - int max_bit_rate = 0; - - tx_chains = hweight8(tx_chains); - if (cfg->rx_with_siso_diversity) - rx_chains = 1; - else - rx_chains = hweight8(rx_chains); - - if (!(data->sku_cap_11n_enable) || - (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) || - !cfg->ht_params) { - ht_info->ht_supported = false; - return; + iwl_read32(trans, CSR_OTP_GP_REG); + + iwl_clear_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_OTP_ACCESS_MODE); +} + +static int iwl_nvm_is_otp(struct iwl_trans *trans) +{ + u32 otpgp; + + /* OTP only valid for CP/PP and after */ + switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) { + case CSR_HW_REV_TYPE_NONE: + IWL_ERR(trans, "Unknown hardware type\n"); + return -EIO; + case CSR_HW_REV_TYPE_5300: + case CSR_HW_REV_TYPE_5350: + case CSR_HW_REV_TYPE_5100: + case CSR_HW_REV_TYPE_5150: + return 0; + default: + otpgp = iwl_read32(trans, CSR_OTP_GP_REG); + if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) + return 1; + return 0; } +} + +static int iwl_init_otp_access(struct iwl_trans *trans) +{ + int ret; + + ret = iwl_finish_nic_init(trans); + if (ret) + return ret; + + iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); - if (data->sku_cap_mimo_disabled) - rx_chains = 1; + /* + * CSR auto clock gate disable bit - + * this is only applicable for HW with OTP shadow RAM + */ + if (trans->trans_cfg->base_params->shadow_ram_support) + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); - ht_info->ht_supported = true; - ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; + return 0; +} - if (cfg->ht_params->stbc) { - ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); +static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr, + __le16 *eeprom_data) +{ + int ret = 0; + u32 r; + u32 otpgp; + + iwl_write32(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + ret = iwl_poll_bit(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(trans, "Time out reading OTP[%d]\n", addr); + return ret; + } + r = iwl_read32(trans, CSR_EEPROM_REG); + /* check for ECC errors: */ + otpgp = iwl_read32(trans, CSR_OTP_GP_REG); + if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { + /* stop in this case */ + /* set the uncorrectable OTP ECC bit for acknowledgment */ + iwl_set_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); + IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n"); + return -EINVAL; + } + if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { + /* continue in this case */ + /* set the correctable OTP ECC bit for acknowledgment */ + iwl_set_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); + IWL_ERR(trans, "Correctable OTP ECC error, continue read\n"); + } + *eeprom_data = cpu_to_le16(r >> 16); + return 0; +} - if (tx_chains > 1) - ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; +/* + * iwl_is_otp_empty: check for empty OTP + */ +static bool iwl_is_otp_empty(struct iwl_trans *trans) +{ + u16 next_link_addr = 0; + __le16 link_value; + bool is_empty = false; + + /* locate the beginning of OTP link list */ + if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) { + if (!link_value) { + IWL_ERR(trans, "OTP is empty\n"); + is_empty = true; + } + } else { + IWL_ERR(trans, "Unable to read first block of OTP list.\n"); + is_empty = true; } - if (cfg->ht_params->ldpc) - ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; + return is_empty; +} - if (trans->trans_cfg->mq_rx_supported || - iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) - ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; - ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; - ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; +/* + * iwl_find_otp_image: find EEPROM image in OTP + * finding the OTP block that contains the EEPROM image. + * the last valid block on the link list (the block _before_ the last block) + * is the block we should read and used to configure the device. + * If all the available OTP blocks are full, the last block will be the block + * we should read and used to configure the device. + * only perform this operation if shadow RAM is disabled + */ +static int iwl_find_otp_image(struct iwl_trans *trans, + u16 *validblockaddr) +{ + u16 next_link_addr = 0, valid_addr; + __le16 link_value = 0; + int usedblocks = 0; - ht_info->mcs.rx_mask[0] = 0xFF; - ht_info->mcs.rx_mask[1] = 0x00; - ht_info->mcs.rx_mask[2] = 0x00; + /* set addressing mode to absolute to traverse the link list */ + iwl_set_otp_access_absolute(trans); - if (rx_chains >= 2) - ht_info->mcs.rx_mask[1] = 0xFF; - if (rx_chains >= 3) - ht_info->mcs.rx_mask[2] = 0xFF; + /* checking for empty OTP or error */ + if (iwl_is_otp_empty(trans)) + return -EINVAL; - if (cfg->ht_params->ht_greenfield_support) - ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; - ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + /* + * start traverse link list + * until reach the max number of OTP blocks + * different devices have different number of OTP blocks + */ + do { + /* save current valid block address + * check for more block on the link list + */ + valid_addr = next_link_addr; + next_link_addr = le16_to_cpu(link_value) * sizeof(u16); + IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n", + usedblocks, next_link_addr); + if (iwl_read_otp_word(trans, next_link_addr, &link_value)) + return -EINVAL; + if (!link_value) { + /* + * reach the end of link list, return success and + * set address point to the starting address + * of the image + */ + *validblockaddr = valid_addr; + /* skip first 2 bytes (link list pointer) */ + *validblockaddr += 2; + return 0; + } + /* more in the link list, continue */ + usedblocks++; + } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items); - max_bit_rate = MAX_BIT_RATE_20_MHZ; + /* OTP has no valid blocks */ + IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n"); + return -EINVAL; +} - if (cfg->ht_params->ht40_bands & BIT(band)) { - ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - ht_info->cap |= IEEE80211_HT_CAP_SGI_40; - max_bit_rate = MAX_BIT_RATE_40_MHZ; +/* + * iwl_read_eeprom - read EEPROM contents + * + * Load the EEPROM contents from adapter and return it + * and its size. + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) +{ + __le16 *e; + u32 gp = iwl_read32(trans, CSR_EEPROM_GP); + int sz; + int ret; + u16 addr; + u16 validblockaddr = 0; + u16 cache_addr = 0; + int nvm_is_otp; + + if (!eeprom || !eeprom_size) + return -EINVAL; + + nvm_is_otp = iwl_nvm_is_otp(trans); + if (nvm_is_otp < 0) + return nvm_is_otp; + + sz = trans->trans_cfg->base_params->eeprom_size; + IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz); + + e = kmalloc(sz, GFP_KERNEL); + if (!e) + return -ENOMEM; + + ret = iwl_eeprom_verify_signature(trans, nvm_is_otp); + if (ret < 0) { + IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); + goto err_free; } - /* Highest supported Rx data rate */ - max_bit_rate *= rx_chains; - WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); - ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); - - /* Tx MCS capabilities */ - ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - if (tx_chains != rx_chains) { - ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; - ht_info->mcs.tx_params |= ((tx_chains - 1) << - IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + /* Make sure driver (instead of uCode) is allowed to read EEPROM */ + ret = iwl_eeprom_acquire_semaphore(trans); + if (ret < 0) { + IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); + goto err_free; } + + if (nvm_is_otp) { + ret = iwl_init_otp_access(trans); + if (ret) { + IWL_ERR(trans, "Failed to initialize OTP access.\n"); + goto err_unlock; + } + + iwl_write32(trans, CSR_EEPROM_GP, + iwl_read32(trans, CSR_EEPROM_GP) & + ~CSR_EEPROM_GP_IF_OWNER_MSK); + + iwl_set_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | + CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); + /* traversing the linked list if no shadow ram supported */ + if (!trans->trans_cfg->base_params->shadow_ram_support) { + ret = iwl_find_otp_image(trans, &validblockaddr); + if (ret) + goto err_unlock; + } + for (addr = validblockaddr; addr < validblockaddr + sz; + addr += sizeof(u16)) { + __le16 eeprom_data; + + ret = iwl_read_otp_word(trans, addr, &eeprom_data); + if (ret) + goto err_unlock; + e[cache_addr / 2] = eeprom_data; + cache_addr += sizeof(u16); + } + } else { + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + u32 r; + + iwl_write32(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + + ret = iwl_poll_bit(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(trans, + "Time out reading EEPROM[%d]\n", addr); + goto err_unlock; + } + r = iwl_read32(trans, CSR_EEPROM_REG); + e[addr / 2] = cpu_to_le16(r >> 16); + } + } + + IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n", + nvm_is_otp ? "OTP" : "EEPROM"); + + iwl_eeprom_release_semaphore(trans); + + *eeprom_size = sz; + *eeprom = (u8 *)e; + return 0; + + err_unlock: + iwl_eeprom_release_semaphore(trans); + err_free: + kfree(e); + + return ret; } -#if IS_ENABLED(CONFIG_IWLDVM) static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const u8 *eeprom, size_t eeprom_size) @@ -790,7 +1061,6 @@ static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg, } /* EEPROM data functions */ - struct iwl_nvm_data * iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size) @@ -837,8 +1107,8 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, data->kelvin_temperature = *(__le16 *)tmp; data->kelvin_voltage = *((__le16 *)tmp + 1); - radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_RADIO_CONFIG); + radio_cfg = + iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG); data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg); data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg); data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg); @@ -878,5 +1148,3 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, kfree(data); return NULL; } -IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data); -#endif diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index 71f67a019cf6..5ca85d90a8d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -13,7 +13,7 @@ #include <linux/netdevice.h> #include <net/mac80211.h> #include <linux/etherdevice.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "iwl-io.h" #include "iwl-trans.h" #include "iwl-modparams.h" diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 5f3d5b15f727..56d19a034c24 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(C) 2018 - 2019, 2022 - 2023 Intel Corporation + * Copyright(C) 2018 - 2019, 2022 - 2024 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -145,8 +145,6 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, #ifdef CONFIG_PM_SLEEP if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec && - priv->trans->ops->d3_suspend && - priv->trans->ops->d3_resume && device_can_wakeup(priv->trans->dev)) { priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | @@ -302,7 +300,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw) return ret; } -static void iwlagn_mac_stop(struct ieee80211_hw *hw) +static void iwlagn_mac_stop(struct ieee80211_hw *hw, bool suspend) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); @@ -730,8 +728,6 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, ret = iwl_sta_rx_agg_stop(priv, sta, tid); break; case IEEE80211_AMPDU_TX_START: - if (!priv->trans->ops->txq_enable) - break; if (!iwl_enable_tx_ampdu()) break; IWL_DEBUG_HT(priv, "start Tx\n"); @@ -1569,7 +1565,21 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(priv, "leave\n"); } +static void +iwlagn_mac_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + + if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART) + iwl_trans_finish_sw_reset(priv->trans); +} + const struct ieee80211_ops iwlagn_hw_ops = { + .add_chanctx = ieee80211_emulate_add_chanctx, + .remove_chanctx = ieee80211_emulate_remove_chanctx, + .change_chanctx = ieee80211_emulate_change_chanctx, + .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = iwlagn_mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = iwlagn_mac_start, @@ -1598,6 +1608,7 @@ const struct ieee80211_ops iwlagn_hw_ops = { .tx_last_beacon = iwlagn_mac_tx_last_beacon, .event_callback = iwlagn_mac_event_callback, .set_tim = iwlagn_mac_set_tim, + .reconfig_complete = iwlagn_mac_reconfig_complete, }; /* This function both allocates and initializes hw and priv. */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 8774dd7b921e..30789ba06d9d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * - * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved. + * Copyright(c) 2024 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well @@ -25,8 +26,7 @@ #include <asm/div64.h> -#include "iwl-eeprom-read.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "iwl-io.h" #include "iwl-trans.h" #include "iwl-op-mode.h" @@ -48,7 +48,7 @@ #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_LICENSE("GPL"); -MODULE_IMPORT_NS(IWLWIFI); +MODULE_IMPORT_NS("IWLWIFI"); /* Please keep this array *SORTED* by hex value. * Access is done through binary search. @@ -1241,7 +1241,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, STATISTICS_NOTIFICATION, REPLY_TX, }; - int i; + int i, err; /************************ * 1. Allocating HW data @@ -1249,6 +1249,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, hw = iwl_alloc_all(); if (!hw) { pr_err("%s: Cannot allocate network device\n", trans->name); + err = -ENOMEM; goto out; } @@ -1299,8 +1300,10 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, break; } - if (WARN_ON(!priv->lib)) + if (WARN_ON(!priv->lib)) { + err = -ENODEV; goto out_free_hw; + } /* * Populate the state variables that the transport layer needs @@ -1325,8 +1328,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, iwlwifi_mod_params.amsdu_size); } - trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED; - trans_cfg.command_groups = iwl_dvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups); @@ -1379,12 +1380,14 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, IWL_INFO(priv, "Detected %s, REV=0x%X\n", priv->trans->name, priv->trans->hw_rev); - if (iwl_trans_start_hw(priv->trans)) + err = iwl_trans_start_hw(priv->trans); + if (err) goto out_free_hw; /* Read the EEPROM */ - if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob, - &priv->eeprom_blob_size)) { + err = iwl_read_eeprom(priv->trans, &priv->eeprom_blob, + &priv->eeprom_blob_size); + if (err) { IWL_ERR(priv, "Unable to init EEPROM\n"); goto out_free_hw; } @@ -1395,13 +1398,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, priv->nvm_data = iwl_parse_eeprom_data(priv->trans, priv->cfg, priv->eeprom_blob, priv->eeprom_blob_size); - if (!priv->nvm_data) + if (!priv->nvm_data) { + err = -ENOMEM; goto out_free_eeprom_blob; + } - if (iwl_nvm_check_version(priv->nvm_data, priv->trans)) + err = iwl_nvm_check_version(priv->nvm_data, priv->trans); + if (err) goto out_free_eeprom; - if (iwl_eeprom_init_hw_params(priv)) + err = iwl_eeprom_init_hw_params(priv); + if (err) goto out_free_eeprom; /* extract MAC Address */ @@ -1448,7 +1455,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, atomic_set(&priv->queue_stop_count[i], 0); } - if (iwl_init_drv(priv)) + err = iwl_init_drv(priv); + if (err) goto out_free_eeprom; /* At this point both hw and priv are initialized. */ @@ -1482,7 +1490,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, * * 7. Setup and register with mac80211 and debugfs **************************************************/ - if (iwlagn_mac_setup_register(priv, &fw->ucode_capa)) + err = iwlagn_mac_setup_register(priv, &fw->ucode_capa); + if (err) goto out_destroy_workqueue; iwl_dbgfs_register(priv, dbgfs_dir); @@ -1502,8 +1511,7 @@ out_free_eeprom: out_free_hw: ieee80211_free_hw(priv->hw); out: - op_mode = NULL; - return op_mode; + return ERR_PTR(err); } static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) @@ -1897,17 +1905,9 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) unsigned int reload_msec; unsigned long reload_jiffies; - if (iwl_have_debug_level(IWL_DL_FW)) - iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); - /* uCode is no longer loaded. */ priv->ucode_loaded = false; - /* Set the FW error flag -- cleared on iwl_down */ - set_bit(STATUS_FW_ERROR, &priv->status); - - iwl_abort_notification_waits(&priv->notif_wait); - /* Keep the restart process from trying to send host * commands by clearing the ready bit */ clear_bit(STATUS_READY, &priv->status); @@ -1944,27 +1944,43 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) } } -static void iwl_nic_error(struct iwl_op_mode *op_mode, bool sync) +static void iwl_nic_error(struct iwl_op_mode *op_mode, + enum iwl_fw_error_type type) { struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + iwl_abort_notification_waits(&priv->notif_wait); + + if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL && iwl_check_for_ct_kill(priv)) + return; + IWL_ERR(priv, "Loaded firmware version: %s\n", priv->fw->fw_version); - iwl_dump_nic_error_log(priv); - iwl_dump_nic_event_log(priv, false, NULL); + if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL) { + IWL_ERR(priv, "Command queue full!\n"); + } else { + iwl_dump_nic_error_log(priv); + iwl_dump_nic_event_log(priv, false, NULL); + } - iwlagn_fw_error(priv, false); + if (iwl_have_debug_level(IWL_DL_FW)) + iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); } -static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode) +static bool iwlagn_sw_reset(struct iwl_op_mode *op_mode, + enum iwl_fw_error_type type) { struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - if (!iwl_check_for_ct_kill(priv)) { - IWL_ERR(priv, "Restarting adapter queue is full\n"); - iwlagn_fw_error(priv, false); - } + if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL && iwl_check_for_ct_kill(priv)) + return false; + + iwlagn_fw_error(priv, false); + return true; } #define EEPROM_RF_CONFIG_TYPE_MAX 0x3 @@ -2119,7 +2135,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = { .hw_rf_kill = iwl_set_hw_rfkill_state, .free_skb = iwl_free_skb, .nic_error = iwl_nic_error, - .cmd_queue_full = iwl_cmd_queue_full, + .sw_reset = iwlagn_sw_reset, .nic_config = iwl_nic_config, .wimax_active = iwl_wimax_active, }; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index f4a6f76cf193..8879e668ef0d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -2673,20 +2673,16 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n"); /* Get max rate if user set max rate */ - if (lq_sta) { - lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1; - if ((sband->band == NL80211_BAND_5GHZ) && - (lq_sta->max_rate_idx != -1)) - lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; - if ((lq_sta->max_rate_idx < 0) || - (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) - lq_sta->max_rate_idx = -1; - } + lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1; + if (sband->band == NL80211_BAND_5GHZ && lq_sta->max_rate_idx != -1) + lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; + if (lq_sta->max_rate_idx < 0 || lq_sta->max_rate_idx >= IWL_RATE_COUNT) + lq_sta->max_rate_idx = -1; - /* Treat uninitialized rate scaling data same as non-existing. */ - if (lq_sta && !lq_sta->drv) { + if (!lq_sta->drv) { IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); - priv_sta = NULL; + /* mac80211 already set up the data for using low rates */ + return; } rate_idx = lq_sta->last_txrate_idx; @@ -2756,7 +2752,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i lq_sta = &sta_priv->lq_sta; sband = hw->wiphy->bands[conf->chandef.chan->band]; - lq_sta->lq.sta_id = sta_id; for (j = 0; j < LQ_SIZE; j++) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index e9d2717362cf..7f67e602940c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -13,7 +13,7 @@ #include <linux/slab.h> #include <linux/sched.h> #include <net/mac80211.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "iwl-trans.h" #include "iwl-io.h" diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h index 23dfcda0dd86..4af792d41dce 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** * - * Copyright(c) 2007 - 2014, 2023 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2014, 2023-2024 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -64,7 +64,7 @@ struct iwl_tt_trans { }; /** - * struct iwl_tt_mgnt - Thermal Throttling Management structure + * struct iwl_tt_mgmt - Thermal Throttling Management structure * @advanced_tt: advanced thermal throttle required * @state: current Thermal Throttling state * @tt_power_mode: Thermal Throttling power mode index diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index dcc4810cb324..efa7b673ebc7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1,10 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2023 Intel Corporation + * Copyright (C) 2019-2024 Intel Corporation */ #include <linux/uuid.h> -#include <linux/dmi.h> #include "iwl-drv.h" #include "iwl-debug.h" #include "acpi.h" @@ -13,68 +12,22 @@ const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6, 0xA5, 0xB3, 0x1F, 0x73, 0x8E, 0x28, 0x5A, 0xDE); -IWL_EXPORT_SYMBOL(iwl_guid); -const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29, - 0x81, 0x4F, 0x75, 0xE4, - 0xDD, 0x26, 0xB5, 0xFD); -IWL_EXPORT_SYMBOL(iwl_rfi_guid); - -static const struct dmi_system_id dmi_ppag_approved_list[] = { - { .ident = "HP", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - }, - }, - { .ident = "SAMSUNG", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), - }, - }, - { .ident = "MSFT", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), - }, - }, - { .ident = "ASUS", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - }, - }, - { .ident = "GOOGLE-HP", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Google"), - DMI_MATCH(DMI_BOARD_VENDOR, "HP"), - }, - }, - { .ident = "GOOGLE-ASUS", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Google"), - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."), - }, - }, - { .ident = "GOOGLE-SAMSUNG", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Google"), - DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), - }, - }, - { .ident = "DELL", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - }, - }, - { .ident = "DELL", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), - }, - }, - { .ident = "RAZER", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Razer"), - }, - }, - {} +static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = { + [DSM_FUNC_QUERY] = sizeof(u32), + [DSM_FUNC_DISABLE_SRD] = sizeof(u8), + [DSM_FUNC_ENABLE_INDONESIA_5G2] = sizeof(u8), + [DSM_FUNC_ENABLE_6E] = sizeof(u32), + [DSM_FUNC_REGULATORY_CONFIG] = sizeof(u32), + /* Not supported in driver */ + [5] = (size_t)0, + [DSM_FUNC_11AX_ENABLEMENT] = sizeof(u32), + [DSM_FUNC_ENABLE_UNII4_CHAN] = sizeof(u32), + [DSM_FUNC_ACTIVATE_CHANNEL] = sizeof(u32), + [DSM_FUNC_FORCE_DISABLE_CHANNELS] = sizeof(u32), + [DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32), + [DSM_FUNC_RFI_CONFIG] = sizeof(u32), + [DSM_FUNC_ENABLE_11BE] = sizeof(u32), }; static int iwl_acpi_get_handle(struct device *dev, acpi_string method, @@ -126,9 +79,9 @@ static void *iwl_acpi_get_object(struct device *dev, acpi_string method) * method (DSM) interface. The returned acpi object must be freed by calling * function. */ -static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, - union acpi_object *args, - const guid_t *guid) +union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev, + int func, union acpi_object *args, + const guid_t *guid) { union acpi_object *obj; @@ -155,7 +108,7 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, size_t expected_size) { union acpi_object *obj; - int ret = 0; + int ret; obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid); if (IS_ERR(obj)) { @@ -170,8 +123,10 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, } else if (obj->type == ACPI_TYPE_BUFFER) { __le64 le_value = 0; - if (WARN_ON_ONCE(expected_size > sizeof(le_value))) - return -EINVAL; + if (WARN_ON_ONCE(expected_size > sizeof(le_value))) { + ret = -EINVAL; + goto out; + } /* if the buffer size doesn't match the expected size */ if (obj->buffer.length != expected_size) @@ -192,54 +147,50 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, } IWL_DEBUG_DEV_RADIO(dev, - "ACPI: DSM method evaluated: func=%d, ret=%d\n", - func, ret); + "ACPI: DSM method evaluated: func=%d, value=%lld\n", + func, *value); + ret = 0; out: ACPI_FREE(obj); return ret; } /* - * Evaluate a DSM with no arguments and a u8 return value, + * This function receives a DSM function number, calculates its expected size + * according to Intel BIOS spec, and fills in the value in a 32-bit field. + * In case the expected size is smaller than 32-bit, padding will be added. */ -int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, - const guid_t *guid, u8 *value) +int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt, + enum iwl_dsm_funcs func, u32 *value) { + size_t expected_size; + u64 tmp; int ret; - u64 val; - ret = iwl_acpi_get_dsm_integer(dev, rev, func, - guid, &val, sizeof(u8)); + BUILD_BUG_ON(ARRAY_SIZE(acpi_dsm_size) != DSM_FUNC_NUM_FUNCS); - if (ret < 0) - return ret; + if (WARN_ON(func >= ARRAY_SIZE(acpi_dsm_size))) + return -EINVAL; - /* cast val (u64) to be u8 */ - *value = (u8)val; - return 0; -} -IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8); + expected_size = acpi_dsm_size[func]; -/* - * Evaluate a DSM with no arguments and a u32 return value, - */ -int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, - const guid_t *guid, u32 *value) -{ - int ret; - u64 val; - - ret = iwl_acpi_get_dsm_integer(dev, rev, func, - guid, &val, sizeof(u32)); + /* Currently all ACPI DSMs are either 8-bit or 32-bit */ + if (expected_size != sizeof(u8) && expected_size != sizeof(u32)) + return -EOPNOTSUPP; - if (ret < 0) + ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV, func, + &iwl_guid, &tmp, expected_size); + if (ret) return ret; - /* cast val (u64) to be u32 */ - *value = (u32)val; + if ((expected_size == sizeof(u8) && tmp != (u8)tmp) || + (expected_size == sizeof(u32) && tmp != (u32)tmp)) + IWL_DEBUG_RADIO(fwrt, + "DSM value overflows the expected size, truncating\n"); + *value = (u32)tmp; + return 0; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32); static union acpi_object * iwl_acpi_get_wifi_pkg_range(struct device *dev, @@ -307,18 +258,18 @@ iwl_acpi_get_wifi_pkg(struct device *dev, tbl_rev); } - -int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, - union iwl_tas_config_cmd *cmd, int fw_ver) +int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt, + struct iwl_tas_data *tas_data) { union acpi_object *wifi_pkg, *data; - int ret, tbl_rev, i, block_list_size, enabled; + int ret, tbl_rev, block_list_size, enabled; + u32 tas_selection; data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); - /* try to read wtas table revision 1 or revision 0*/ + /* try to read wtas table */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WTAS_WIFI_DATA_SIZE, &tbl_rev); @@ -327,63 +278,41 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, goto out_free; } - if (tbl_rev == 1 && wifi_pkg->package.elements[1].type == - ACPI_TYPE_INTEGER) { - u32 tas_selection = - (u32)wifi_pkg->package.elements[1].integer.value; - u16 override_iec = - (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS; - u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >> - ACPI_WTAS_ENABLE_IEC_POS; - u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS; - - - enabled = tas_selection & ACPI_WTAS_ENABLED_MSK; - if (fw_ver <= 3) { - cmd->v3.override_tas_iec = cpu_to_le16(override_iec); - cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec); - } else { - cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb; - cmd->v4.override_tas_iec = (u8)override_iec; - cmd->v4.enable_tas_iec = (u8)enabled_iec; - } - - } else if (tbl_rev == 0 && - wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) { - enabled = !!wifi_pkg->package.elements[1].integer.value; - } else { + if (tbl_rev < 0 || tbl_rev > 2 || + wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } - if (!enabled) { - IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n"); - ret = 0; - goto out_free; - } + tas_selection = (u32)wifi_pkg->package.elements[1].integer.value; + enabled = tas_selection & IWL_WTAS_ENABLED_MSK; + + IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n", + tas_selection); + tas_data->table_source = BIOS_SOURCE_ACPI; + tas_data->table_revision = tbl_rev; + tas_data->tas_selection = tas_selection; + + IWL_DEBUG_RADIO(fwrt, "TAS %s enabled\n", + enabled ? "is" : "not"); IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev); if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER || wifi_pkg->package.elements[2].integer.value > - APCI_WTAS_BLACK_LIST_MAX) { + IWL_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n", wifi_pkg->package.elements[2].integer.value); ret = -EINVAL; goto out_free; } + block_list_size = wifi_pkg->package.elements[2].integer.value; - cmd->v4.block_list_size = cpu_to_le32(block_list_size); + tas_data->block_list_size = block_list_size; IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size); - if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) { - IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n", - block_list_size); - ret = -EINVAL; - goto out_free; - } - for (i = 0; i < block_list_size; i++) { - u32 country; + for (int i = 0; i < block_list_size; i++) { + u16 country; if (wifi_pkg->package.elements[3 + i].type != ACPI_TYPE_INTEGER) { @@ -394,28 +323,28 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, } country = wifi_pkg->package.elements[3 + i].integer.value; - cmd->v4.block_list_array[i] = cpu_to_le32(country); + tas_data->block_list_array[i] = country; IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country); } - ret = 1; + ret = enabled; out_free: kfree(data); return ret; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_tas); -int iwl_acpi_get_mcc(struct device *dev, char *mcc) +int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc) { union acpi_object *wifi_pkg, *data; u32 mcc_val; int ret, tbl_rev; - data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD); + data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); - wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE, + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WRDD_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); @@ -429,6 +358,11 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc) } mcc_val = wifi_pkg->package.elements[1].integer.value; + if (mcc_val != BIOS_MCC_CHINA) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "ACPI WRDD is supported only for CN\n"); + goto out_free; + } mcc[0] = (mcc_val >> 8) & 0xff; mcc[1] = mcc_val & 0xff; @@ -439,46 +373,42 @@ out_free: kfree(data); return ret; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc); -u64 iwl_acpi_get_pwr_limit(struct device *dev) +int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit) { union acpi_object *data, *wifi_pkg; - u64 dflt_pwr_limit; - int tbl_rev; + int tbl_rev, ret = -EINVAL; - data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD); - if (IS_ERR(data)) { - dflt_pwr_limit = 0; + *dflt_pwr_limit = 0; + data = iwl_acpi_get_object(fwrt->dev, ACPI_SPLC_METHOD); + if (IS_ERR(data)) goto out; - } - wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev != 0 || - wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) { - dflt_pwr_limit = 0; + wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) goto out_free; - } - dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value; + *dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value; + ret = 0; out_free: kfree(data); out: - return dflt_pwr_limit; + return ret; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit); -int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) +int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk) { union acpi_object *wifi_pkg, *data; int ret, tbl_rev; - data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD); + data = iwl_acpi_get_object(fwrt->dev, ACPI_ECKV_METHOD); if (IS_ERR(data)) return PTR_ERR(data); - wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE, + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_ECKV_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); @@ -499,109 +429,33 @@ out_free: kfree(data); return ret; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv); -static int iwl_sar_set_profile(union acpi_object *table, - struct iwl_sar_profile *profile, - bool enabled, u8 num_chains, u8 num_sub_bands) +static int +iwl_acpi_parse_chains_table(union acpi_object *table, + struct iwl_sar_profile_chain *chains, + u8 num_chains, u8 num_sub_bands) { - int i, j, idx = 0; - - /* - * The table from ACPI is flat, but we store it in a - * structured array. - */ - for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) { - for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) { + for (u8 chain = 0; chain < num_chains; chain++) { + for (u8 subband = 0; subband < BIOS_SAR_MAX_SUB_BANDS_NUM; + subband++) { /* if we don't have the values, use the default */ - if (i >= num_chains || j >= num_sub_bands) { - profile->chains[i].subbands[j] = 0; + if (subband >= num_sub_bands) { + chains[chain].subbands[subband] = 0; + } else if (table->type != ACPI_TYPE_INTEGER || + table->integer.value > U8_MAX) { + return -EINVAL; } else { - if (table[idx].type != ACPI_TYPE_INTEGER || - table[idx].integer.value > U8_MAX) - return -EINVAL; - - profile->chains[i].subbands[j] = - table[idx].integer.value; - - idx++; + chains[chain].subbands[subband] = + table->integer.value; + table++; } } } - /* Only if all values were valid can the profile be enabled */ - profile->enabled = enabled; - return 0; } -static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, - __le16 *per_chain, u32 n_subbands, - int prof_a, int prof_b) -{ - int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b }; - int i, j; - - for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) { - struct iwl_sar_profile *prof; - - /* don't allow SAR to be disabled (profile 0 means disable) */ - if (profs[i] == 0) - return -EPERM; - - /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */ - if (profs[i] > ACPI_SAR_PROFILE_NUM) - return -EINVAL; - - /* profiles go from 1 to 4, so decrement to access the array */ - prof = &fwrt->sar_profiles[profs[i] - 1]; - - /* if the profile is disabled, do nothing */ - if (!prof->enabled) { - IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n", - profs[i]); - /* - * if one of the profiles is disabled, we - * ignore all of them and return 1 to - * differentiate disabled from other failures. - */ - return 1; - } - - IWL_DEBUG_INFO(fwrt, - "SAR EWRD: chain %d profile index %d\n", - i, profs[i]); - IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); - for (j = 0; j < n_subbands; j++) { - per_chain[i * n_subbands + j] = - cpu_to_le16(prof->chains[i].subbands[j]); - IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", - j, prof->chains[i].subbands[j]); - } - } - - return 0; -} - -int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, - __le16 *per_chain, u32 n_tables, u32 n_subbands, - int prof_a, int prof_b) -{ - int i, ret = 0; - - for (i = 0; i < n_tables; i++) { - ret = iwl_sar_fill_table(fwrt, - &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0], - n_subbands, prof_a, prof_b); - if (ret) - break; - } - - return ret; -} -IWL_EXPORT_SYMBOL(iwl_sar_select_profile); - -int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) +int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *table, *data; int ret, tbl_rev; @@ -680,22 +534,23 @@ read_table: /* The profile from WRDS is officially profile 1, but goes * into sar_profiles[0] (because we don't have a profile 0). */ - ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], - flags & IWL_SAR_ENABLE_MSK, - num_chains, num_sub_bands); + ret = iwl_acpi_parse_chains_table(table, fwrt->sar_profiles[0].chains, + num_chains, num_sub_bands); + if (!ret && flags & IWL_SAR_ENABLE_MSK) + fwrt->sar_profiles[0].enabled = true; + out_free: kfree(data); return ret; } -IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table); -int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) +int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; bool enabled; int i, n_profiles, tbl_rev, pos; int ret = 0; - u8 num_chains, num_sub_bands; + u8 num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) @@ -711,7 +566,6 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) goto out_free; } - num_chains = ACPI_SAR_NUM_CHAINS_REV2; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; goto read_table; @@ -727,7 +581,6 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) goto out_free; } - num_chains = ACPI_SAR_NUM_CHAINS_REV1; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; goto read_table; @@ -743,7 +596,6 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) goto out_free; } - num_chains = ACPI_SAR_NUM_CHAINS_REV0; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; goto read_table; @@ -767,7 +619,7 @@ read_table: * from index 1, so the maximum value allowed here is * ACPI_SAR_PROFILES_NUM - 1. */ - if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { + if (n_profiles >= BIOS_SAR_MAX_PROFILE_NUM) { ret = -EINVAL; goto out_free; } @@ -775,28 +627,60 @@ read_table: /* the tables start at element 3 */ pos = 3; + BUILD_BUG_ON(ACPI_SAR_NUM_CHAINS_REV0 != ACPI_SAR_NUM_CHAINS_REV1); + BUILD_BUG_ON(ACPI_SAR_NUM_CHAINS_REV2 != 2 * ACPI_SAR_NUM_CHAINS_REV0); + + /* parse non-cdb chains for all profiles */ for (i = 0; i < n_profiles; i++) { + union acpi_object *table = &wifi_pkg->package.elements[pos]; + /* The EWRD profiles officially go from 2 to 4, but we * save them in sar_profiles[1-3] (because we don't * have profile 0). So in the array we start from 1. */ - ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos], - &fwrt->sar_profiles[i + 1], enabled, - num_chains, num_sub_bands); + ret = iwl_acpi_parse_chains_table(table, + fwrt->sar_profiles[i + 1].chains, + ACPI_SAR_NUM_CHAINS_REV0, + num_sub_bands); if (ret < 0) - break; + goto out_free; /* go to the next table */ - pos += num_chains * num_sub_bands; + pos += ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands; } + /* non-cdb table revisions */ + if (tbl_rev < 2) + goto set_enabled; + + /* parse cdb chains for all profiles */ + for (i = 0; i < n_profiles; i++) { + struct iwl_sar_profile_chain *chains; + union acpi_object *table; + + table = &wifi_pkg->package.elements[pos]; + chains = &fwrt->sar_profiles[i + 1].chains[ACPI_SAR_NUM_CHAINS_REV0]; + ret = iwl_acpi_parse_chains_table(table, + chains, + ACPI_SAR_NUM_CHAINS_REV0, + num_sub_bands); + if (ret < 0) + goto out_free; + + /* go to the next table */ + pos += ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands; + } + +set_enabled: + for (i = 0; i < n_profiles; i++) + fwrt->sar_profiles[i + 1].enabled = enabled; + out_free: kfree(data); return ret; } -IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table); -int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) +int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; int i, j, k, ret, tbl_rev; @@ -811,7 +695,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) .revisions = BIT(3), .bands = ACPI_GEO_NUM_BANDS_REV2, .profiles = ACPI_NUM_GEO_PROFILES_REV3, - .min_profiles = 3, + .min_profiles = BIOS_GEO_MIN_PROFILE_NUM, }, { .revisions = BIT(2), @@ -867,22 +751,25 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) entry = &wifi_pkg->package.elements[entry_idx]; entry_idx++; if (entry->type != ACPI_TYPE_INTEGER || - entry->integer.value > num_profiles) { + entry->integer.value > num_profiles || + entry->integer.value < + rev_data[idx].min_profiles) { ret = -EINVAL; goto out_free; } - num_profiles = entry->integer.value; /* - * this also validates >= min_profiles since we - * otherwise wouldn't have gotten the data when - * looking up in ACPI + * Check to see if we received package count + * same as max # of profiles */ if (wifi_pkg->package.count != hdr_size + profile_size * num_profiles) { ret = -EINVAL; goto out_free; } + + /* Number of valid profiles */ + num_profiles = entry->integer.value; } goto read_table; } @@ -897,7 +784,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) read_table: fwrt->geo_rev = tbl_rev; for (i = 0; i < num_profiles; i++) { - for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) { + for (j = 0; j < BIOS_GEO_MAX_NUM_BANDS; j++) { union acpi_object *entry; /* @@ -921,7 +808,7 @@ read_table: entry->integer.value; } - for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) { + for (k = 0; k < BIOS_GEO_NUM_CHAINS; k++) { /* same here as above */ if (j >= num_bands) { fwrt->geo_profiles[i].bands[j].chains[k] = @@ -949,151 +836,26 @@ out_free: kfree(data); return ret; } -IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table); - -bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) -{ - /* - * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on - * earlier firmware versions. Unfortunately, we don't have a - * TLV API flag to rely on, so rely on the major version which - * is in the first byte of ucode_ver. This was implemented - * initially on version 38 and then backported to 17. It was - * also backported to 29, but only for 7265D devices. The - * intention was to have it in 36 as well, but not all 8000 - * family got this feature enabled. The 8000 family is the - * only one using version 36, so skip this version entirely. - */ - return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || - (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && - fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) || - (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && - ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == - CSR_HW_REV_TYPE_7265D)); -} -IWL_EXPORT_SYMBOL(iwl_sar_geo_support); - -int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset *table, - u32 n_bands, u32 n_profiles) -{ - int i, j; - - if (!fwrt->geo_enabled) - return -ENODATA; - - if (!iwl_sar_geo_support(fwrt)) - return -EOPNOTSUPP; - - for (i = 0; i < n_profiles; i++) { - for (j = 0; j < n_bands; j++) { - struct iwl_per_chain_offset *chain = - &table[i * n_bands + j]; - - chain->max_tx_power = - cpu_to_le16(fwrt->geo_profiles[i].bands[j].max); - chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0]; - chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1]; - IWL_DEBUG_RADIO(fwrt, - "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", - i, j, - fwrt->geo_profiles[i].bands[j].chains[0], - fwrt->geo_profiles[i].bands[j].chains[1], - fwrt->geo_profiles[i].bands[j].max); - } - } - - return 0; -} -IWL_EXPORT_SYMBOL(iwl_sar_geo_init); - -__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) -{ - int ret; - u8 value; - u32 val; - __le32 config_bitmap = 0; - - /* - * Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'. - * Setting config_bitmap Indonesia bit is valid only for HR/JF. - */ - switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) { - case IWL_CFG_RF_TYPE_HR1: - case IWL_CFG_RF_TYPE_HR2: - case IWL_CFG_RF_TYPE_JF1: - case IWL_CFG_RF_TYPE_JF2: - ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, - DSM_FUNC_ENABLE_INDONESIA_5G2, - &iwl_guid, &value); - - if (!ret && value == DSM_VALUE_INDONESIA_ENABLE) - config_bitmap |= - cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); - break; - default: - break; - } - - /* - ** Evaluate func 'DSM_FUNC_DISABLE_SRD' - */ - ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, - DSM_FUNC_DISABLE_SRD, - &iwl_guid, &value); - if (!ret) { - if (value == DSM_VALUE_SRD_PASSIVE) - config_bitmap |= - cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); - else if (value == DSM_VALUE_SRD_DISABLE) - config_bitmap |= - cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); - } - - if (fw_has_capa(&fwrt->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) { - /* - ** Evaluate func 'DSM_FUNC_REGULATORY_CONFIG' - */ - ret = iwl_acpi_get_dsm_u32(fwrt->dev, 0, - DSM_FUNC_REGULATORY_CONFIG, - &iwl_guid, &val); - /* - * China 2022 enable if the BIOS object does not exist or - * if it is enabled in BIOS. - */ - if (ret < 0 || val & DSM_MASK_CHINA_22_REG) - config_bitmap |= - cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK); - } - - return config_bitmap; -} -IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap); int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data, *flags; int i, j, ret, tbl_rev, num_sub_bands = 0; int idx = 2; - u8 cmd_ver; - - fwrt->ppag_flags = 0; - fwrt->ppag_table_valid = false; data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD); if (IS_ERR(data)) return PTR_ERR(data); - /* try to read ppag table rev 2 or 1 (both have the same data size) */ + /* try to read ppag table rev 3, 2 or 1 (all have the same data size) */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev); if (!IS_ERR(wifi_pkg)) { - if (tbl_rev == 1 || tbl_rev == 2) { + if (tbl_rev >= 1 && tbl_rev <= 3) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; IWL_DEBUG_RADIO(fwrt, - "Reading PPAG table v2 (tbl_rev=%d)\n", + "Reading PPAG table (tbl_rev=%d)\n", tbl_rev); goto read_table; } else { @@ -1128,19 +890,8 @@ read_table: goto out_free; } - fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK; - cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, - WIDE_ID(PHY_OPS_GROUP, - PER_PLATFORM_ANT_GAIN_CMD), - IWL_FW_CMD_VER_UNKNOWN); - if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) { - ret = -EINVAL; - goto out_free; - } - if (!fwrt->ppag_flags && cmd_ver <= 3) { - ret = 0; - goto out_free; - } + fwrt->ppag_flags = iwl_bios_get_ppag_flags(flags->integer.value, + fwrt->ppag_ver); /* * read, verify gain values and save them into the PPAG table. @@ -1158,166 +909,152 @@ read_table: } fwrt->ppag_chains[i].subbands[j] = ent->integer.value; - /* from ver 4 the fw deals with out of range values */ - if (cmd_ver >= 4) - continue; - if ((j == 0 && - (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB || - fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) || - (j != 0 && - (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB || - fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) { - ret = -EINVAL; - goto out_free; - } } } - fwrt->ppag_table_valid = true; ret = 0; out_free: kfree(data); return ret; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table); -int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd, - int *cmd_size) +void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, + struct iwl_phy_specific_cfg *filters) { - u8 cmd_ver; - int i, j, num_sub_bands; - s8 *gain; + struct iwl_phy_specific_cfg tmp = {}; + union acpi_object *wifi_pkg, *data; + int tbl_rev, i; - /* many firmware images for JF lie about this */ - if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) == - CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) - return -EOPNOTSUPP; + data = iwl_acpi_get_object(fwrt->dev, ACPI_WPFC_METHOD); + if (IS_ERR(data)) + return; - if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { - IWL_DEBUG_RADIO(fwrt, - "PPAG capability not supported by FW, command not sent.\n"); - return -EINVAL; - } + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WPFC_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg)) + goto out_free; - cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, - WIDE_ID(PHY_OPS_GROUP, - PER_PLATFORM_ANT_GAIN_CMD), - IWL_FW_CMD_VER_UNKNOWN); - if (!fwrt->ppag_table_valid || (cmd_ver <= 3 && !fwrt->ppag_flags)) { - IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); - return -EINVAL; + if (tbl_rev != 0) + goto out_free; + + BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != + ACPI_WPFC_WIFI_DATA_SIZE - 1); + + for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) { + if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER) + goto out_free; + tmp.filter_cfg_chains[i] = + cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value); } - /* The 'flags' field is the same in v1 and in v2 so we can just - * use v1 to access it. - */ - cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags); - - IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver); - if (cmd_ver == 1) { - num_sub_bands = IWL_NUM_SUB_BANDS_V1; - gain = cmd->v1.gain[0]; - *cmd_size = sizeof(cmd->v1); - if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) { - /* in this case FW supports revision 0 */ - IWL_DEBUG_RADIO(fwrt, - "PPAG table rev is %d, send truncated table\n", - fwrt->ppag_ver); - } - } else if (cmd_ver >= 2 && cmd_ver <= 4) { - num_sub_bands = IWL_NUM_SUB_BANDS_V2; - gain = cmd->v2.gain[0]; - *cmd_size = sizeof(cmd->v2); - if (fwrt->ppag_ver == 0) { - /* in this case FW supports revisions 1 or 2 */ - IWL_DEBUG_RADIO(fwrt, - "PPAG table rev is 0, send padded table\n"); - } - } else { - IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); - return -EINVAL; - } - - /* ppag mode */ - IWL_DEBUG_RADIO(fwrt, - "PPAG MODE bits were read from bios: %d\n", - cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK)); - if ((cmd_ver == 1 && !fw_has_capa(&fwrt->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) || - (cmd_ver == 2 && fwrt->ppag_ver == 2)) { - cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); - IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n"); - } else { - IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n"); + IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n"); + *filters = tmp; +out_free: + kfree(data); +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters); + +void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt) +{ + union acpi_object *wifi_pkg, *data; + int tbl_rev; + + data = iwl_acpi_get_object(fwrt->dev, ACPI_GLAI_METHOD); + if (IS_ERR(data)) + return; + + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_GLAI_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg)) + goto out_free; + + if (tbl_rev != 0) { + IWL_DEBUG_RADIO(fwrt, "Invalid GLAI revision: %d\n", tbl_rev); + goto out_free; } - IWL_DEBUG_RADIO(fwrt, - "PPAG MODE bits going to be sent: %d\n", - cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK)); + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || + wifi_pkg->package.elements[1].integer.value > ACPI_GLAI_MAX_STATUS) + goto out_free; - for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { - for (j = 0; j < num_sub_bands; j++) { - gain[i * num_sub_bands + j] = - fwrt->ppag_chains[i].subbands[j]; - IWL_DEBUG_RADIO(fwrt, - "PPAG table: chain[%d] band[%d]: gain = %d\n", - i, j, gain[i * num_sub_bands + j]); - } - } + fwrt->uefi_tables_lock_status = + wifi_pkg->package.elements[1].integer.value; - return 0; + IWL_DEBUG_RADIO(fwrt, + "Loaded UEFI WIFI GUID lock status: %d from ACPI\n", + fwrt->uefi_tables_lock_status); +out_free: + kfree(data); } -IWL_EXPORT_SYMBOL(iwl_read_ppag_table); +IWL_EXPORT_SYMBOL(iwl_acpi_get_guid_lock_status); -bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt) +int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value) { + union acpi_object *wifi_pkg, *data; + int ret = -ENOENT; + int tbl_rev; - if (!dmi_check_system(dmi_ppag_approved_list)) { - IWL_DEBUG_RADIO(fwrt, - "System vendor '%s' is not in the approved list, disabling PPAG.\n", - dmi_get_system_info(DMI_SYS_VENDOR)); - fwrt->ppag_flags = 0; - return false; + data = iwl_acpi_get_object(fwrt->dev, ACPI_WBEM_METHOD); + if (IS_ERR(data)) + return ret; + + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WBEM_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg)) + goto out_free; + + if (tbl_rev != IWL_ACPI_WBEM_REVISION) { + IWL_DEBUG_RADIO(fwrt, "Unsupported ACPI WBEM revision:%d\n", + tbl_rev); + goto out_free; } - return true; + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) + goto out_free; + + *value = wifi_pkg->package.elements[1].integer.value & + IWL_ACPI_WBEM_REV0_MASK; + IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from ACPI\n"); + ret = 0; +out_free: + kfree(data); + return ret; } -IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved); -void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, - struct iwl_phy_specific_cfg *filters) +int iwl_acpi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value) { - struct iwl_phy_specific_cfg tmp = {}; union acpi_object *wifi_pkg, *data; - int tbl_rev, i; + int ret = -ENOENT; + int tbl_rev; - data = iwl_acpi_get_object(fwrt->dev, ACPI_WPFC_METHOD); + data = iwl_acpi_get_object(fwrt->dev, ACPI_DSBR_METHOD); if (IS_ERR(data)) - return; + return ret; - /* try to read wtas table revision 1 or revision 0*/ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, - ACPI_WPFC_WIFI_DATA_SIZE, + ACPI_DSBR_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) goto out_free; - if (tbl_rev != 0) + if (tbl_rev != ACPI_DSBR_WIFI_DATA_REV) { + IWL_DEBUG_RADIO(fwrt, "Unsupported ACPI DSBR revision:%d\n", + tbl_rev); goto out_free; - - BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE); - - for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) { - if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER) - return; - tmp.filter_cfg_chains[i] = - cpu_to_le32(wifi_pkg->package.elements[i].integer.value); } - IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n"); - *filters = tmp; + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) + goto out_free; + + *value = wifi_pkg->package.elements[1].integer.value; + IWL_DEBUG_RADIO(fwrt, "Loaded DSBR config from ACPI value: 0x%x\n", + *value); + ret = 0; out_free: kfree(data); + return ret; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index e9277f6f3582..e50b93472dd2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -7,6 +7,7 @@ #define __iwl_fw_acpi__ #include <linux/acpi.h> +#include "fw/regulatory.h" #include "fw/api/commands.h" #include "fw/api/power.h" #include "fw/api/phy.h" @@ -25,6 +26,9 @@ #define ACPI_PPAG_METHOD "PPAG" #define ACPI_WTAS_METHOD "WTAS" #define ACPI_WPFC_METHOD "WPFC" +#define ACPI_GLAI_METHOD "GLAI" +#define ACPI_WBEM_METHOD "WBEM" +#define ACPI_DSBR_METHOD "DSBR" #define ACPI_WIFI_DOMAIN (0x07) @@ -56,281 +60,212 @@ #define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_NUM_CHAINS_REV2 * \ ACPI_SAR_NUM_SUB_BANDS_REV2 + 3) -#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */ +#define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */ /* revision 0 and 1 are identical, except for the semantics in the FW */ #define ACPI_GEO_NUM_BANDS_REV0 2 #define ACPI_GEO_NUM_BANDS_REV2 3 -#define ACPI_GEO_NUM_CHAINS 2 #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 #define ACPI_ECKV_WIFI_DATA_SIZE 2 /* + * One element for domain type, + * and one for enablement of Wi-Fi 320MHz per MCC + */ +#define ACPI_WBEM_WIFI_DATA_SIZE 2 +/* + * One element for domain type, + * and one for DSBR response data + */ +#define ACPI_DSBR_WIFI_DATA_SIZE 2 +#define ACPI_DSBR_WIFI_DATA_REV 1 + +/* + * One element for domain type, + * and one for the status + */ +#define ACPI_GLAI_WIFI_DATA_SIZE 2 +#define ACPI_GLAI_MAX_STATUS 2 +/* * TAS size: 1 elelment for type, * 1 element for enabled field, * 1 element for block list size, * 16 elements for block list array */ -#define APCI_WTAS_BLACK_LIST_MAX 16 -#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX) -#define ACPI_WTAS_ENABLED_MSK 0x1 -#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2 -#define ACPI_WTAS_ENABLE_IEC_MSK 0x4 -#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1 -#define ACPI_WTAS_ENABLE_IEC_POS 0x2 -#define ACPI_WTAS_USA_UHB_MSK BIT(16) -#define ACPI_WTAS_USA_UHB_POS 16 - +#define ACPI_WTAS_WIFI_DATA_SIZE (3 + IWL_WTAS_BLACK_LIST_MAX) #define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V1) + 2) #define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V2) + 2) -/* PPAG gain value bounds in 1/8 dBm */ -#define ACPI_PPAG_MIN_LB -16 -#define ACPI_PPAG_MAX_LB 24 -#define ACPI_PPAG_MIN_HB -16 -#define ACPI_PPAG_MAX_HB 40 -#define ACPI_PPAG_MASK 3 -#define IWL_PPAG_ETSI_MASK BIT(0) - #define IWL_SAR_ENABLE_MSK BIT(0) #define IWL_REDUCE_POWER_FLAGS_POS 1 -/* - * The profile for revision 2 is a superset of revision 1, which is in - * turn a superset of revision 0. So we can store all revisions - * inside revision 2, which is what we represent here. - */ -struct iwl_sar_profile_chain { - u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; -}; +/* The Inidcator whether UEFI WIFI GUID tables are locked is read from ACPI */ +#define UEFI_WIFI_GUID_UNLOCKED 0 -struct iwl_sar_profile { - bool enabled; - struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2]; -}; +#define ACPI_DSM_REV 0 -/* Same thing as with SAR, all revisions fit in revision 2 */ -struct iwl_geo_profile_band { - u8 max; - u8 chains[ACPI_GEO_NUM_CHAINS]; -}; +#define DSM_INTERNAL_FUNC_GET_PLAT_INFO 1 +/* TBD: VPRO is BIT(0) in the result, but what's the result? */ -struct iwl_geo_profile { - struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2]; -}; - -/* Same thing as with SAR, all revisions fit in revision 2 */ -struct iwl_ppag_chain { - s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; -}; +#define DSM_INTERNAL_FUNC_PRODUCT_RESET 2 -enum iwl_dsm_funcs_rev_0 { - DSM_FUNC_QUERY = 0, - DSM_FUNC_DISABLE_SRD = 1, - DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, - DSM_FUNC_ENABLE_6E = 3, - DSM_FUNC_REGULATORY_CONFIG = 4, - DSM_FUNC_11AX_ENABLEMENT = 6, - DSM_FUNC_ENABLE_UNII4_CHAN = 7, - DSM_FUNC_ACTIVATE_CHANNEL = 8, - DSM_FUNC_FORCE_DISABLE_CHANNELS = 9, - DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10, +/* DSM_INTERNAL_FUNC_PRODUCT_RESET - product reset (aka "PLDR") */ +enum iwl_dsm_internal_product_reset_cmds { + DSM_INTERNAL_PLDR_CMD_GET_MODE = 1, + DSM_INTERNAL_PLDR_CMD_SET_MODE = 2, + DSM_INTERNAL_PLDR_CMD_GET_STATUS = 3, }; -enum iwl_dsm_values_srd { - DSM_VALUE_SRD_ACTIVE, - DSM_VALUE_SRD_PASSIVE, - DSM_VALUE_SRD_DISABLE, - DSM_VALUE_SRD_MAX +enum iwl_dsm_internal_product_reset_mode { + DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET = BIT(0), + DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR = BIT(1), + DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON = BIT(2), }; -enum iwl_dsm_values_indonesia { - DSM_VALUE_INDONESIA_DISABLE, - DSM_VALUE_INDONESIA_ENABLE, - DSM_VALUE_INDONESIA_RESERVED, - DSM_VALUE_INDONESIA_MAX -}; +struct iwl_dsm_internal_product_reset_cmd { + /* cmd is from enum iwl_dsm_internal_product_reset_cmds */ + u16 cmd; + u16 value; +} __packed; -/* DSM RFI uses a different GUID, so need separate definitions */ - -#define DSM_RFI_FUNC_ENABLE 3 - -enum iwl_dsm_values_rfi { - DSM_VALUE_RFI_ENABLE, - DSM_VALUE_RFI_DISABLE, - DSM_VALUE_RFI_MAX -}; - -enum iwl_dsm_masks_reg { - DSM_MASK_CHINA_22_REG = BIT(2) -}; +#define IWL_ACPI_WBEM_REV0_MASK (BIT(0) | BIT(1)) +#define IWL_ACPI_WBEM_REVISION 0 #ifdef CONFIG_ACPI struct iwl_fw_runtime; extern const guid_t iwl_guid; -extern const guid_t iwl_rfi_guid; - -int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, - const guid_t *guid, u8 *value); -int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, - const guid_t *guid, u32 *value); +union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev, + int func, union acpi_object *args, + const guid_t *guid); /** * iwl_acpi_get_mcc - read MCC from ACPI, if available * - * @dev: the struct device + * @fwrt: the fw runtime struct * @mcc: output buffer (3 bytes) that will get the MCC * * This function tries to read the current MCC from ACPI if available. */ -int iwl_acpi_get_mcc(struct device *dev, char *mcc); +int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc); -u64 iwl_acpi_get_pwr_limit(struct device *dev); +int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit); /* * iwl_acpi_get_eckv - read external clock validation from ACPI, if available * - * @dev: the struct device + * @fwrt: the fw runtime struct * @extl_clk: output var (2 bytes) that will get the clk indication. * * This function tries to read the external clock indication * from ACPI if available. */ -int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk); +int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk); -int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, - __le16 *per_chain, u32 n_tables, u32 n_subbands, - int prof_a, int prof_b); +int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt); -int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt); +int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt); -int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt); +int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt); -int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt); +int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt, + struct iwl_tas_data *data); -bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); - -int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset *table, - u32 n_bands, u32 n_profiles); - -int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, - union iwl_tas_config_cmd *cmd, int fw_ver); +int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt); -__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt); +void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, + struct iwl_phy_specific_cfg *filters); -int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt); +void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt); -int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd, - int *cmd_size); +int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt, + enum iwl_dsm_funcs func, u32 *value); -bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt); +int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value); -void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, - struct iwl_phy_specific_cfg *filters); +int iwl_acpi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value); #else /* CONFIG_ACPI */ -static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev, - int func, union acpi_object *args) +static inline union acpi_object * +iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, + union acpi_object *args, const guid_t *guid) { return ERR_PTR(-ENOENT); } -static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, - const guid_t *guid, u8 *value) -{ - return -ENOENT; -} - -static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, - const guid_t *guid, u32 *value) +static inline int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc) { return -ENOENT; } -static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc) -{ - return -ENOENT; -} - -static inline u64 iwl_acpi_get_pwr_limit(struct device *dev) +static inline int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, + u64 *dflt_pwr_limit) { + *dflt_pwr_limit = 0; return 0; } -static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) +static inline int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk) { return -ENOENT; } -static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, - __le16 *per_chain, u32 n_tables, u32 n_subbands, - int prof_a, int prof_b) +static inline int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt) { return -ENOENT; } -static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) +static inline int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt) { return -ENOENT; } -static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) -{ - return -ENOENT; -} - -static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) +static inline int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt) { return 1; } -static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) +static inline int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt, + struct iwl_tas_data *data) { - return false; + return -ENOENT; } -static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, - union iwl_tas_config_cmd *cmd, int fw_ver) +static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) { return -ENOENT; } -static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) -{ - return 0; -} +/* macro since the second argument doesn't always exist */ +#define iwl_acpi_get_phy_filters(fwrt, filters) do { } while (0) -static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) +static inline void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt) { - return -ENOENT; } -static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, - union iwl_ppag_table_cmd *cmd, int *cmd_size) +static inline int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt, + enum iwl_dsm_funcs func, u32 *value) { return -ENOENT; } -static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt) +static inline int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value) { - return false; + return -ENOENT; } -static inline void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, - struct iwl_phy_specific_cfg *filters) +static inline int iwl_acpi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value) { + return -ENOENT; } - #endif /* CONFIG_ACPI */ #endif /* __iwl_fw_acpi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index e00ab21e7358..ebe85fdf08d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -113,7 +113,7 @@ struct iwl_alive_ntf_v6 { } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */ /** - * enum iwl_extended_cfg_flag - commands driver may send before + * enum iwl_extended_cfg_flags - commands driver may send before * finishing init flow * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands @@ -126,7 +126,7 @@ enum iwl_extended_cfg_flags { }; /** - * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for + * struct iwl_init_extended_cfg_cmd - mark what commands ucode should wait for * before finishing init flows * @init_flags: values from iwl_extended_cfg_flags */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h index d9044ada6a43..9b942c4aabd9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2020, 2022 Intel Corporation + * Copyright (C) 2012-2014, 2020, 2022, 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -56,8 +56,6 @@ struct iwl_binding_cmd { } __packed; /* BINDING_CMD_API_S_VER_2 */ #define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) -#define IWL_LMAC_24G_INDEX 0 -#define IWL_LMAC_5G_INDEX 1 /* The maximal number of fragments in the FW's schedule session */ #define IWL_MVM_MAX_QUOTA 128 @@ -77,7 +75,7 @@ struct iwl_time_quota_data_v1 { } __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ /** - * struct iwl_time_quota_cmd - configuration of time quota between bindings + * struct iwl_time_quota_cmd_v1 - configuration of time quota between bindings * ( TIME_QUOTA_CMD = 0x2c ) * @quotas: allocations per binding * Note: on non-CDB the fourth one is the auxilary mac and is diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index 3e81e9369224..ddc84430d895 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* + * Copyright (C) 2023-2024 Intel Corporation * Copyright (C) 2013-2014, 2018-2019 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH @@ -76,73 +77,6 @@ struct iwl_bt_coex_ci_cmd { __le32 secondary_ch_phy_id; } __packed; /* BT_CI_MSG_API_S_VER_2 */ -#define BT_MBOX(n_dw, _msg, _pos, _nbits) \ - BT_MBOX##n_dw##_##_msg##_POS = (_pos), \ - BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS - -enum iwl_bt_mxbox_dw0 { - BT_MBOX(0, LE_SLAVE_LAT, 0, 3), - BT_MBOX(0, LE_PROF1, 3, 1), - BT_MBOX(0, LE_PROF2, 4, 1), - BT_MBOX(0, LE_PROF_OTHER, 5, 1), - BT_MBOX(0, CHL_SEQ_N, 8, 4), - BT_MBOX(0, INBAND_S, 13, 1), - BT_MBOX(0, LE_MIN_RSSI, 16, 4), - BT_MBOX(0, LE_SCAN, 20, 1), - BT_MBOX(0, LE_ADV, 21, 1), - BT_MBOX(0, LE_MAX_TX_POWER, 24, 4), - BT_MBOX(0, OPEN_CON_1, 28, 2), -}; - -enum iwl_bt_mxbox_dw1 { - BT_MBOX(1, BR_MAX_TX_POWER, 0, 4), - BT_MBOX(1, IP_SR, 4, 1), - BT_MBOX(1, LE_MSTR, 5, 1), - BT_MBOX(1, AGGR_TRFC_LD, 8, 6), - BT_MBOX(1, MSG_TYPE, 16, 3), - BT_MBOX(1, SSN, 19, 2), -}; - -enum iwl_bt_mxbox_dw2 { - BT_MBOX(2, SNIFF_ACT, 0, 3), - BT_MBOX(2, PAG, 3, 1), - BT_MBOX(2, INQUIRY, 4, 1), - BT_MBOX(2, CONN, 5, 1), - BT_MBOX(2, SNIFF_INTERVAL, 8, 5), - BT_MBOX(2, DISC, 13, 1), - BT_MBOX(2, SCO_TX_ACT, 16, 2), - BT_MBOX(2, SCO_RX_ACT, 18, 2), - BT_MBOX(2, ESCO_RE_TX, 20, 2), - BT_MBOX(2, SCO_DURATION, 24, 6), -}; - -enum iwl_bt_mxbox_dw3 { - BT_MBOX(3, SCO_STATE, 0, 1), - BT_MBOX(3, SNIFF_STATE, 1, 1), - BT_MBOX(3, A2DP_STATE, 2, 1), - BT_MBOX(3, ACL_STATE, 3, 1), - BT_MBOX(3, MSTR_STATE, 4, 1), - BT_MBOX(3, OBX_STATE, 5, 1), - BT_MBOX(3, A2DP_SRC, 6, 1), - BT_MBOX(3, OPEN_CON_2, 8, 2), - BT_MBOX(3, TRAFFIC_LOAD, 10, 2), - BT_MBOX(3, CHL_SEQN_LSB, 12, 1), - BT_MBOX(3, INBAND_P, 13, 1), - BT_MBOX(3, MSG_TYPE_2, 16, 3), - BT_MBOX(3, SSN_2, 19, 2), - BT_MBOX(3, UPDATE_REQUEST, 21, 1), -}; - -#define BT_MBOX_MSG(_notif, _num, _field) \ - ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ - >> BT_MBOX##_num##_##_field##_POS) - -#define BT_MBOX_PRINT(_num, _field, _end) \ - pos += scnprintf(buf + pos, bufsz - pos, \ - "\t%s: %d%s", \ - #_field, \ - BT_MBOX_MSG(notif, _num, _field), \ - true ? "\n" : ", ") enum iwl_bt_activity_grading { BT_OFF = 0, BT_ON_NO_CONNECTION = 1, @@ -161,7 +95,7 @@ enum iwl_bt_ci_compliance { }; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */ /** - * struct iwl_bt_coex_profile_notif - notification about BT coex + * struct iwl_bt_coex_prof_old_notif - notification about BT coex * @mbox_msg: message from BT to WiFi * @msg_idx: the index of the message * @bt_ci_compliance: enum %iwl_bt_ci_compliance @@ -170,9 +104,13 @@ enum iwl_bt_ci_compliance { * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading * @ttc_status: is TTC enabled - one bit per PHY * @rrc_status: is RRC enabled - one bit per PHY - * @reserved: reserved + * The following fields are only for version 5, and are reserved in version 4: + * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is + * utilizing) when the RSSI is low (<= -65 dBm) + * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that + * BT is utilizing) when the RSSI is mid/high (>= -65 dBm) */ -struct iwl_bt_coex_profile_notif { +struct iwl_bt_coex_prof_old_notif { __le32 mbox_msg[4]; __le32 msg_idx; __le32 bt_ci_compliance; @@ -182,7 +120,35 @@ struct iwl_bt_coex_profile_notif { __le32 bt_activity_grading; u8 ttc_status; u8 rrc_status; - __le16 reserved; -} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ + u8 wifi_loss_low_rssi; + u8 wifi_loss_mid_high_rssi; +} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 + * BT_COEX_PROFILE_NTFY_API_S_VER_5 + */ + +/** + * enum iwl_bt_coex_subcmd_ids - coex configuration command IDs + */ +enum iwl_bt_coex_subcmd_ids { + /** + *@PROFILE_NOTIF: &struct iwl_bt_coex_profile_notif + */ + PROFILE_NOTIF = 0xFF, +}; + +#define COEX_NUM_BAND 3 +#define COEX_NUM_CHAINS 2 + +/** + * struct iwl_bt_coex_profile_notif - notification about BT coex + * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is + * utilizing) when the RSSI is low (<= -65 dBm) + * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that + * BT is utilizing) when the RSSI is mid/high (>= -65 dBm) + */ +struct iwl_bt_coex_profile_notif { + u8 wifi_loss_low_rssi[COEX_NUM_BAND][COEX_NUM_CHAINS]; + u8 wifi_loss_mid_high_rssi[COEX_NUM_BAND][COEX_NUM_CHAINS]; +} __packed; /* BT_COEX_BT_PROFILE_NTF_API_S_VER_1 */ #endif /* __iwl_fw_api_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 7544c4cb1a30..34a1f97653c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2022, 2024 Intel Corporation */ #ifndef __iwl_fw_api_commands_h__ #define __iwl_fw_api_commands_h__ @@ -25,6 +25,8 @@ * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids * @LOCATION_GROUP: location group, uses command IDs from * &enum iwl_location_subcmd_ids + * @BT_COEX_GROUP: bt coex group, uses command IDs from + * &enum iwl_bt_coex_subcmd_ids * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from * &enum iwl_prot_offload_subcmd_ids * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from @@ -43,6 +45,7 @@ enum iwl_mvm_command_groups { SCAN_GROUP = 0x6, NAN_GROUP = 0x7, LOCATION_GROUP = 0x8, + BT_COEX_GROUP = 0x9, PROT_OFFLOAD_GROUP = 0xb, REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, @@ -144,8 +147,8 @@ enum iwl_legacy_cmds { /** * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or * &struct iwl_tx_cmd_gen3, - * response in &struct iwl_mvm_tx_resp or - * &struct iwl_mvm_tx_resp_v3 + * response in &struct iwl_tx_resp or + * &struct iwl_tx_resp_v3 */ TX_CMD = 0x1c, @@ -398,7 +401,7 @@ enum iwl_legacy_cmds { REDUCE_TX_POWER_CMD = 0x9f, /** - * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif + * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif_v4 */ MISSED_BEACONS_NOTIFICATION = 0xa2, @@ -444,7 +447,7 @@ enum iwl_legacy_cmds { /** * @BA_NOTIF: - * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif + * BlockAck notification, uses &struct iwl_compressed_ba_notif * or &struct iwl_mvm_ba_notif depending on the HW */ BA_NOTIF = 0xc5, @@ -467,7 +470,7 @@ enum iwl_legacy_cmds { MARKER_CMD = 0xcb, /** - * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif + * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_prof_old_notif */ BT_PROFILE_NOTIFICATION = 0xce, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h index 4419631604b4..1fc65469990e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019, 2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2023-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -76,7 +76,7 @@ struct iwl_phy_specific_cfg { } __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/ /** - * struct iwl_phy_cfg_cmd - Phy configuration command + * struct iwl_phy_cfg_cmd_v1 - Phy configuration command * * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg * @calib_control: calibration control data diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h index 1fa5678c1cd6..a9fa5f054ce0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h @@ -40,4 +40,7 @@ enum iwl_ctxt_action { FW_CTXT_ACTION_REMOVE, }; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ +#define IWL_LMAC_24G_INDEX 0 +#define IWL_LMAC_5G_INDEX 1 + #endif /* __iwl_fw_api_context_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index ea99d41040d2..c2362bc786b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -42,7 +42,7 @@ struct iwl_d3_manager_config { /* TODO: OFFLOADS_QUERY_API_S_VER_1 */ /** - * enum iwl_d3_proto_offloads - enabled protocol offloads + * enum iwl_proto_offloads - enabled protocol offloads * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid @@ -195,7 +195,7 @@ struct iwl_wowlan_pattern_v1 { #define IWL_WOWLAN_MAX_PATTERNS 20 /** - * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns + * struct iwl_wowlan_patterns_cmd_v1 - WoWLAN wakeup patterns */ struct iwl_wowlan_patterns_cmd_v1 { /** @@ -324,7 +324,7 @@ struct iwl_wowlan_patterns_cmd { u8 n_patterns; /** - * @n_patterns: sta_id + * @sta_id: sta_id */ u8 sta_id; @@ -368,7 +368,7 @@ enum iwl_wowlan_flags { }; /** - * struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6) + * struct iwl_wowlan_config_cmd_v6 - WoWLAN configuration (versions 5 and 6) * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters * @non_qos_seq: non-QoS sequence counter to use next. * Reserved if the struct has version >= 6. @@ -380,7 +380,7 @@ enum iwl_wowlan_flags { * @sta_id: station ID for wowlan. * @reserved: reserved */ -struct iwl_wowlan_config_cmd { +struct iwl_wowlan_config_cmd_v6 { __le32 wakeup_filter; __le16 non_qos_seq; __le16 qos_seq[8]; @@ -390,7 +390,27 @@ struct iwl_wowlan_config_cmd { u8 flags; u8 sta_id; u8 reserved; -} __packed; /* WOWLAN_CONFIG_API_S_VER_5 */ +} __packed; /* WOWLAN_CONFIG_API_S_VER_6 */ + +/** + * struct iwl_wowlan_config_cmd - WoWLAN configuration + * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters + * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down + * @is_11n_connection: indicates HT connection + * @offloading_tid: TID reserved for firmware use + * @flags: extra flags, see &enum iwl_wowlan_flags + * @sta_id: station ID for wowlan. + * @reserved: reserved + */ +struct iwl_wowlan_config_cmd { + __le32 wakeup_filter; + u8 wowlan_ba_teardown_tids; + u8 is_11n_connection; + u8 offloading_tid; + u8 flags; + u8 sta_id; + u8 reserved[3]; +} __packed; /* WOWLAN_CONFIG_API_S_VER_7 */ #define IWL_NUM_RSC 16 #define WOWLAN_KEY_MAX_SIZE 32 @@ -843,8 +863,54 @@ struct iwl_wowlan_info_notif_v2 { u8 reserved2[2]; } __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */ +/* MAX MLO keys of non-active links that can arrive in the notification */ +#define WOWLAN_MAX_MLO_KEYS 18 + /** - * struct iwl_wowlan_info_notif - WoWLAN information notification + * enum iwl_wowlan_mlo_gtk_type - GTK types + * @WOWLAN_MLO_GTK_KEY_TYPE_GTK: GTK + * @WOWLAN_MLO_GTK_KEY_TYPE_IGTK: IGTK + * @WOWLAN_MLO_GTK_KEY_TYPE_BIGTK: BIGTK + * @WOWLAN_MLO_GTK_KEY_NUM_TYPES: number of key types + */ +enum iwl_wowlan_mlo_gtk_type { + WOWLAN_MLO_GTK_KEY_TYPE_GTK, + WOWLAN_MLO_GTK_KEY_TYPE_IGTK, + WOWLAN_MLO_GTK_KEY_TYPE_BIGTK, + WOWLAN_MLO_GTK_KEY_NUM_TYPES +}; /* WOWLAN_MLO_GTK_KEY_TYPE_API_E_VER_1 */ + +/** + * enum iwl_wowlan_mlo_gtk_flag - MLO GTK flags + * @WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK: 0 for len 16, 1 for len 32 + * @WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK: key id (ranges from 0 to 7) + * @WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK: spec link id of the key + * @WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK: &enum iwl_wowlan_mlo_gtk_type + * @WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK: is this the last given key per + * key-type / link-id - the currently used key + */ +enum iwl_wowlan_mlo_gtk_flag { + WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK = 0x0001, + WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK = 0x000E, + WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK = 0x00F0, + WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK = 0x0300, + WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK = 0x0400 +}; /* WOWLAN_MLO_GTK_FLAG_API_E_VER_1 */ + +/** + * struct iwl_wowlan_mlo_gtk - MLO GTK info + * @key: key material + * @flags: &enum iwl_wowlan_mlo_gtk_flag + * @pn: packet number + */ +struct iwl_wowlan_mlo_gtk { + u8 key[WOWLAN_KEY_MAX_SIZE]; + __le16 flags; + u8 pn[6]; +} __packed; /* WOWLAN_MLO_GTK_KEY_API_S_VER_1 */ + +/** + * struct iwl_wowlan_info_notif_v4 - WoWLAN information notification * @gtk: GTK data * @igtk: IGTK data * @bigtk: BIGTK data @@ -859,9 +925,12 @@ struct iwl_wowlan_info_notif_v2 { * @tid_tear_down: bit mask of tids whose BA sessions were closed * in suspend state * @station_id: station id + * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs + * following this notif, or reserved in version < 4 * @reserved2: reserved + * @mlo_gtks: array of GTKs of size num_mlo_link_keys for version >= 4 */ -struct iwl_wowlan_info_notif { +struct iwl_wowlan_info_notif_v4 { struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM]; @@ -875,8 +944,49 @@ struct iwl_wowlan_info_notif { __le32 received_beacons; u8 tid_tear_down; u8 station_id; - u8 reserved2[2]; -} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3 */ + u8 num_mlo_link_keys; + u8 reserved2; + struct iwl_wowlan_mlo_gtk mlo_gtks[]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3, _VER_4 */ + +/** + * struct iwl_wowlan_info_notif - WoWLAN information notification + * @gtk: GTK data + * @igtk: IGTK data + * @bigtk: BIGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched patterns + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @station_id: station id + * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs + * following this notif + * @tid_offloaded_tx: tid used by the firmware to transmit data packets + * while in wowlan + * @mlo_gtks: array of GTKs of size num_mlo_link_keys + */ +struct iwl_wowlan_info_notif { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 qos_seq_ctr; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + u8 tid_tear_down; + u8 station_id; + u8 num_mlo_link_keys; + u8 tid_offloaded_tx; + struct iwl_wowlan_mlo_gtk mlo_gtks[]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_5 */ /** * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index 751b596ea1a5..570a3f722510 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* + * Copyright (C) 2024 Intel Corporation * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH @@ -90,6 +91,12 @@ enum iwl_data_path_subcmd_ids { SEC_KEY_CMD = 0x18, /** + * @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode, + * uses &struct iwl_mvm_esr_mode_notif + */ + ESR_MODE_NOTIF = 0xF3, + + /** * @MONITOR_NOTIF: Datapath monitoring notification, using * &struct iwl_datapath_monitor_notif */ @@ -101,7 +108,7 @@ enum iwl_data_path_subcmd_ids { RX_NO_DATA_NOTIF = 0xF5, /** - * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode, + * @THERMAL_DUAL_CHAIN_REQUEST: firmware request for SMPS mode, * &struct iwl_thermal_dual_chain_request */ THERMAL_DUAL_CHAIN_REQUEST = 0xF6, @@ -224,28 +231,33 @@ struct iwl_synced_time_rsp { #define PTP_CTX_MAX_DATA_SIZE 128 /** - * struct iwl_time_msmt_ptp_ctx - Vendor specific information element + * struct iwl_time_msmt_ptp_ctx - Vendor specific element * to allow a space for flexibility for the userspace App * - * @element_id: element id of vendor specific ie - * @length: length of vendor specific ie - * @reserved: for alignment - * @data: vendor specific data blob + * @ftm: FTM specific vendor element + * @ftm.element_id: element id of vendor specific ie + * @ftm.length: length of vendor specific ie + * @ftm.reserved: for alignment + * @ftm.data: vendor specific data blob + * @tm: TM specific vendor element + * @tm.element_id: element id of vendor specific ie + * @tm.length: length of vendor specific ie + * @tm.data: vendor specific data blob */ struct iwl_time_msmt_ptp_ctx { - /* Differentiate between FTM and TM specific Vendor IEs */ + /* Differentiate between FTM and TM specific Vendor elements */ union { struct { u8 element_id; u8 length; __le16 reserved; u8 data[PTP_CTX_MAX_DATA_SIZE]; - } ftm; /* FTM specific vendor IE */ + } ftm; struct { u8 element_id; u8 length; u8 data[PTP_CTX_MAX_DATA_SIZE]; - } tm; /* TM specific vendor IE */ + } tm; }; } __packed /* PTP_CTX_VER_1 */; @@ -379,7 +391,7 @@ enum iwl_datapath_monitor_notif_type { struct iwl_datapath_monitor_notif { __le32 type; - u8 mac_id; + u8 link_id; u8 reserved[3]; } __packed; /* MONITOR_NTF_API_S_VER_1 */ @@ -524,6 +536,10 @@ struct iwl_rx_baid_cfg_cmd_remove { /** * struct iwl_rx_baid_cfg_cmd - BAID allocation/config command * @action: the action, from &enum iwl_rx_baid_action + * @alloc: allocation data + * @modify: modify data + * @remove_v1: remove data (version 1) + * @remove: remove data */ struct iwl_rx_baid_cfg_cmd { __le32 action; @@ -558,6 +574,7 @@ enum iwl_scd_queue_cfg_operation { /** * struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command * @operation: the operation, see &enum iwl_scd_queue_cfg_operation + * @u: union depending on command usage * @u.add.sta_mask: station mask * @u.add.tid: TID * @u.add.reserved: reserved @@ -627,6 +644,7 @@ enum iwl_sec_key_flags { /** * struct iwl_sec_key_cmd - security key command * @action: action from &enum iwl_ctxt_action + * @u: union depending on command type * @u.add.sta_mask: station mask for the new key * @u.add.key_id: key ID (0-7) for the new key * @u.add.key_flags: key flags per &enum iwl_sec_key_flags diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 394747deb269..550de6db1834 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #ifndef __iwl_fw_dbg_tlv_h__ #define __iwl_fw_dbg_tlv_h__ @@ -147,32 +147,34 @@ struct iwl_fw_ini_region_internal_buffer { * Configures parameters for region data collection * * @hdr: debug header - * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID + * @id: region id. Max id is %IWL_FW_INI_MAX_REGION_ID * @type: region type. One of &enum iwl_fw_ini_region_type * @sub_type: region sub type * @sub_type_ver: region sub type version * @reserved: not in use * @name: region name * @dev_addr: device address configuration. Used by - * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC, - * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX, - * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR, - * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG - * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, - * &IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, + * %IWL_FW_INI_REGION_DEVICE_MEMORY, %IWL_FW_INI_REGION_PERIPHERY_MAC, + * %IWL_FW_INI_REGION_PERIPHERY_PHY, %IWL_FW_INI_REGION_PERIPHERY_AUX, + * %IWL_FW_INI_REGION_PAGING, %IWL_FW_INI_REGION_CSR, + * %IWL_FW_INI_REGION_DRAM_IMR and %IWL_FW_INI_REGION_PCI_IOSF_CONFIG + * %IWL_FW_INI_REGION_DBGI_SRAM, %FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, + * %IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, * @dev_addr_range: device address range configuration. Used by - * &IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and - * &IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE - * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and - * &IWL_FW_INI_REGION_RXF + * %IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and + * %IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE + * @fifos: fifos configuration. Used by %IWL_FW_INI_REGION_TXF and + * %IWL_FW_INI_REGION_RXF * @err_table: error table configuration. Used by - * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and - * IWL_FW_INI_REGION_UMAC_ERROR_TABLE + * %IWL_FW_INI_REGION_LMAC_ERROR_TABLE and + * %IWL_FW_INI_REGION_UMAC_ERROR_TABLE * @internal_buffer: internal monitor buffer configuration. Used by - * &IWL_FW_INI_REGION_INTERNAL_BUFFER + * %IWL_FW_INI_REGION_INTERNAL_BUFFER + * @special_mem: special device memory region, used by + * %IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY * @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id. - * Used by &IWL_FW_INI_REGION_DRAM_BUFFER - * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV + * Used by %IWL_FW_INI_REGION_DRAM_BUFFER + * @tlv_mask: tlv collection mask. Used by %IWL_FW_INI_REGION_TLV * @addrs: array of addresses attached to the end of the region tlv */ struct iwl_fw_ini_region_tlv { @@ -291,7 +293,7 @@ struct iwl_fw_ini_addr_val { } __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */ /** - * struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory. + * struct iwl_fw_ini_conf_set_tlv - configuration TLV to set register/memory. * * @hdr: debug header * @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point @@ -319,7 +321,7 @@ struct iwl_fw_ini_conf_set_tlv { * @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration * @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration * @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration - * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported + * @IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM: max number of configuration supported */ enum iwl_fw_ini_config_set_type { @@ -360,6 +362,7 @@ enum iwl_fw_ini_allocation_id { * @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location * @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location * @IWL_FW_INI_LOCATION_NPK_PATH: NPK location + * @IWL_FW_INI_LOCATION_NUM: number of valid locations */ enum iwl_fw_ini_buffer_location { IWL_FW_INI_LOCATION_INVALID, @@ -439,6 +442,7 @@ enum iwl_fw_ini_region_device_memory_subtype { * Hard coded time points in which the driver can send hcmd or perform dump * collection * + * @IWL_FW_INI_TIME_POINT_INVALID: invalid timepoint * @IWL_FW_INI_TIME_POINT_EARLY: pre loading the FW * @IWL_FW_INI_TIME_POINT_AFTER_ALIVE: first cmd from host after alive notif * @IWL_FW_INI_TIME_POINT_POST_INIT: last cmd in series of init sequence @@ -468,6 +472,12 @@ enum iwl_fw_ini_region_device_memory_subtype { * @IWL_FW_INI_TIME_POINT_EAPOL_FAILED: EAPOL failed * @IWL_FW_INI_TIME_POINT_FAKE_TX: fake Tx * @IWL_FW_INI_TIME_POINT_DEASSOC: de association + * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ: request to override preset + * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START: start handling override preset + * request + * @IWL_FW_INI_TIME_SCAN_FAILURE: failed scan channel list + * @IWL_FW_INI_TIME_ESR_LINK_UP: EMLSR is active (several links are activated) + * @IWL_FW_INI_TIME_ESR_LINK_DOWN: EMLSR is inactive (only one active link left) * @IWL_FW_INI_TIME_POINT_NUM: number of time points */ enum iwl_fw_ini_time_point { @@ -498,6 +508,11 @@ enum iwl_fw_ini_time_point { IWL_FW_INI_TIME_POINT_EAPOL_FAILED, IWL_FW_INI_TIME_POINT_FAKE_TX, IWL_FW_INI_TIME_POINT_DEASSOC, + IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ, + IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START, + IWL_FW_INI_TIME_SCAN_FAILURE, + IWL_FW_INI_TIME_ESR_LINK_UP, + IWL_FW_INI_TIME_ESR_LINK_DOWN, IWL_FW_INI_TIME_POINT_NUM, }; /* FW_TLV_DEBUG_TIME_POINT_API_E */ @@ -553,7 +568,7 @@ enum iwl_fw_ini_dump_policy { * enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW. * * @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions - * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions + * @IWL_FW_INI_DUMP_MEDIUM: dump more regions than "brief", but not all regions * @IWL_FW_INI_DUMP_VERBOSE : dump all regions */ enum iwl_fw_ini_dump_type { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index b740c65a7dca..aa88e91d117e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -1,11 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_debug_h__ #define __iwl_fw_api_debug_h__ +#include "dbg-tlv.h" /** * enum iwl_debug_cmds - debug commands @@ -394,7 +395,7 @@ struct iwl_buf_alloc_cmd { * * @first_word: magic word value * @second_word: magic word value - * @framfrags: DRAM fragmentaion detail + * @dram_frags: DRAM fragmentaion detail */ struct iwl_dram_info { __le32 first_word; @@ -476,6 +477,9 @@ struct iwl_mvm_tas_status_per_mac { * @tas_status_mac: TAS status per lmac, uses * &struct iwl_mvm_tas_status_per_mac * @in_dual_radio: is TAS in dual radio? - TRUE/FALSE + * @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags. + * This member is valid only when fw has + * %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability. * @reserved: reserved */ struct iwl_mvm_tas_status_resp { @@ -485,7 +489,8 @@ struct iwl_mvm_tas_status_resp { __le16 block_list[16]; struct iwl_mvm_tas_status_per_mac tas_status_mac[2]; u8 in_dual_radio; - u8 reserved[3]; + u8 uhb_allowed_flags; + u8 reserved[2]; } __packed; /*DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3*/ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index b044990c7b87..b8dff139aa05 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -2,6 +2,7 @@ /* * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2024 Intel Corporation */ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ @@ -390,10 +391,62 @@ struct iwl_tof_responder_config_cmd_v9 { __le16 max_time_between_msr; } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ +/** + * struct iwl_tof_responder_config_cmd - ToF AP mode + * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field + * @responder_cfg_flags: &iwl_tof_responder_cfg_flags + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @bss_color: current AP bss_color + * @channel_num: current AP Channel + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency, see iwl_mvm_get_ctrl_pos() + * @sta_id: index of the AP STA when in AP mode + * @band: current AP band + * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @common_calib: XVT: common calibration value + * @specific_calib: XVT: specific calibration value + * @bssid: Current AP BSSID + * @r2i_ndp_params: parameters for R2I NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf + * @i2r_ndp_params: parameters for I2R NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf + * @min_time_between_msr: for non trigger based NDP ranging, minimum time + * between measurements in milliseconds. + * @max_time_between_msr: for non trigger based NDP ranging, maximum time + * between measurements in milliseconds. + */ +struct iwl_tof_responder_config_cmd { + __le32 cmd_valid_fields; + __le32 responder_cfg_flags; + u8 format_bw; + u8 bss_color; + u8 channel_num; + u8 ctrl_ch_position; + u8 sta_id; + u8 band; + __le16 toa_offset; + __le16 common_calib; + __le16 specific_calib; + u8 bssid[ETH_ALEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + __le16 min_time_between_msr; + __le16 max_time_between_msr; +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_10 */ + #define IWL_LCI_CIVIC_IE_MAX_SIZE 400 /** - * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings + * struct iwl_tof_responder_dyn_config_cmd_v2 - Dynamic responder settings * @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer * @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer * @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if @@ -561,6 +614,11 @@ struct iwl_tof_range_req_ap_entry_v2 { * the responder asked for LMR feedback although the initiator did not set * the LMR feedback bit in the FTM request. If not set, the initiator will * continue with the session and will provide the LMR feedback. + * @IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC: send an incorrect SAC in the + * first NDP exchange. This is used for testing. + * @IWL_INITIATOR_AP_FLAGS_TEST_BAD_SLTF: use incorrect secure LTF tx key. This + * is used for testing. Only supported from version 15 of the range request + * command. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), @@ -577,6 +635,8 @@ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13), IWL_INITIATOR_AP_FLAGS_PMF = BIT(14), IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15), + IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC = BIT(16), + IWL_INITIATOR_AP_FLAGS_TEST_BAD_SLTF = BIT(17), }; /** @@ -630,6 +690,7 @@ enum iwl_location_frame_format { * @IWL_LOCATION_BW_20MHZ: 20MHz * @IWL_LOCATION_BW_40MHZ: 40MHz * @IWL_LOCATION_BW_80MHZ: 80MHz + * @IWL_LOCATION_BW_160MHZ: 160MHz */ enum iwl_location_bw { IWL_LOCATION_BW_20MHZ, @@ -710,7 +771,7 @@ enum iwl_location_cipher { * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, - * otherwise should be set to &IWL_MVM_INVALID_STA. + * otherwise should be set to &IWL_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement @@ -757,7 +818,7 @@ struct iwl_tof_range_req_ap_entry_v6 { * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, - * otherwise should be set to &IWL_MVM_INVALID_STA. + * otherwise should be set to &IWL_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement @@ -770,10 +831,10 @@ struct iwl_tof_range_req_ap_entry_v6 { * &IWL_INITIATOR_AP_FLAGS_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. */ struct iwl_tof_range_req_ap_entry_v7 { __le32 initiator_ap_flags; @@ -796,6 +857,7 @@ struct iwl_tof_range_req_ap_entry_v7 { } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */ #define IWL_LOCATION_MAX_STS_POS 3 +#define IWL_LOCATION_TOTAL_LTF_POS 6 /** * struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters @@ -814,7 +876,7 @@ struct iwl_tof_range_req_ap_entry_v7 { * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, - * otherwise should be set to &IWL_MVM_INVALID_STA. + * otherwise should be set to &IWL_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement @@ -827,10 +889,10 @@ struct iwl_tof_range_req_ap_entry_v7 { * &IWL_INITIATOR_AP_FLAGS_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. * bits 0 - 2: max LTF repetitions * bits 3 - 5: max number of spatial streams @@ -888,7 +950,7 @@ struct iwl_tof_range_req_ap_entry_v8 { * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, - * otherwise should be set to &IWL_MVM_INVALID_STA. + * otherwise should be set to &IWL_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement @@ -903,10 +965,10 @@ struct iwl_tof_range_req_ap_entry_v8 { * &IWL_INITIATOR_AP_FLAGS_TB or &IWL_INITIATOR_AP_FLAGS_NON_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. * bits 0 - 2: max LTF repetitions * bits 3 - 5: max number of spatial streams @@ -953,6 +1015,78 @@ struct iwl_tof_range_req_ap_entry_v9 { } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */ /** + * struct iwl_tof_range_req_ap_entry_v10 - AP configuration parameters + * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. + * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz + * @channel_num: AP Channel number + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @bssid: AP's BSSID + * @burst_period: For EDCA based ranging: Recommended value to be sent to the + * AP. Measurement periodicity In units of 100ms. ignored if + * num_of_bursts_exp = 0. + * For non trigger based NDP ranging, the maximum time between + * measurements in units of milliseconds. + * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of + * the number of measurement iterations (min 2^0 = 1, max 2^14) + * @sta_id: the station id of the AP. Only relevant when associated to the AP, + * otherwise should be set to &IWL_INVALID_STA. + * @cipher: pairwise cipher suite for secured measurement. + * &enum iwl_location_cipher. + * @hltk: HLTK to be used for secured 11az measurement + * @tk: TK to be used for secured 11az measurement + * @calib: An array of calibration values per FTM rx bandwidth. + * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the + * calibration value that corresponds to the rx bandwidth of the FTM + * frame. + * @beacon_interval: beacon interval of the AP in TUs. Only required if + * &IWL_INITIATOR_AP_FLAGS_TB is set. + * @rx_pn: the next expected PN for protected management frames Rx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_INVALID_STA. + * @tx_pn: the next PN to use for protected management frames Tx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_INVALID_STA. + * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: max total LTFs. One of + * &enum ieee80211_range_params_max_total_ltf. + * @i2r_ndp_params: parameters for I2R NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: max total LTFs. One of + * &enum ieee80211_range_params_max_total_ltf. + * @min_time_between_msr: For non trigger based NDP ranging, the minimum time + * between measurements in units of milliseconds + */ +struct iwl_tof_range_req_ap_entry_v10 { + __le32 initiator_ap_flags; + u8 band; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 bssid[ETH_ALEN]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[HLTK_11AZ_LEN]; + u8 tk[TK_11AZ_LEN]; + __le16 calib[IWL_TOF_BW_NUM]; + __le16 beacon_interval; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + __le16 min_time_between_msr; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */ + +/** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as * possible (not supported for this release) @@ -1229,6 +1363,34 @@ struct iwl_tof_range_req_cmd_v13 { struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */ +/** + * struct iwl_tof_range_req_cmd_v14 - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @range_req_bssid: ranging request BSSID + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @macaddr_template: MAC address template to use for non-randomized bits + * @req_timeout_ms: Requested timeout of the response in units of milliseconds. + * This is the session time for completing the measurement. + * @tsf_mac_id: report the measurement start time for each ap in terms of the + * TSF of this mac id. 0xff to disable TSF reporting. + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v10. + */ +struct iwl_tof_range_req_cmd_v14 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v10 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */ + /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index f15e6d64c298..37bb7002c1c9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -42,7 +42,7 @@ enum iwl_mac_conf_subcmd_ids { */ LINK_CONFIG_CMD = 0x9, /** - * @STA_CONFIG_CMD: &struct iwl_mvm_sta_cfg_cmd + * @STA_CONFIG_CMD: &struct iwl_sta_cfg_cmd */ STA_CONFIG_CMD = 0xA, /** @@ -50,7 +50,7 @@ enum iwl_mac_conf_subcmd_ids { */ AUX_STA_CMD = 0xB, /** - * @STA_REMOVE_CMD: &struct iwl_mvm_remove_sta_cmd + * @STA_REMOVE_CMD: &struct iwl_remove_sta_cmd */ STA_REMOVE_CMD = 0xC, /** @@ -62,11 +62,19 @@ enum iwl_mac_conf_subcmd_ids { */ ROC_CMD = 0xE, /** + * @MISSED_BEACONS_NOTIF: &struct iwl_missed_beacons_notif + */ + MISSED_BEACONS_NOTIF = 0xF6, + /** + * @EMLSR_TRANS_FAIL_NOTIF: &struct iwl_esr_trans_fail_notif + */ + EMLSR_TRANS_FAIL_NOTIF = 0xF7, + /** * @ROC_NOTIF: &struct iwl_roc_notif */ ROC_NOTIF = 0xF8, /** - * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif + * @SESSION_PROTECTION_NOTIF: &struct iwl_session_prot_notif */ SESSION_PROTECTION_NOTIF = 0xFB, @@ -144,7 +152,7 @@ struct iwl_missed_vap_notif { } __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */ /** - * struct iwl_channel_switch_start_notif - Channel switch start notification + * struct iwl_channel_switch_start_notif_v1 - Channel switch start notification * * @id_and_color: ID and color of the MAC */ @@ -242,9 +250,9 @@ struct iwl_mac_low_latency_cmd { * @esr_transition_timeout: the timeout required by the AP for the * eSR transition. * Available only from version 2 of the command. - * This values comes from the EMLSR transition delay in the EML + * This value comes from the EMLSR transition delay in the EML * Capabilities subfield. - * @medium_sync_delay: the value as it appeasr in P802.11be_D2.2 Figure 9-1002j. + * @medium_sync_delay: the value as it appears in P802.11be_D2.2 Figure 9-1002j. * @assoc_id: unique ID assigned by the AP during association * @reserved1: alignment * @data_policy: see &enum iwl_mac_data_policy @@ -317,7 +325,6 @@ enum iwl_mac_config_filter_flags { * If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0 * len delim to determine if AGG or single. * @client: client mac data - * @go_ibss: mac data for go or ibss * @p2p_dev: mac data for p2p device */ struct iwl_mac_config_cmd { @@ -374,7 +381,9 @@ struct iwl_mac_config_cmd { * iwl_link_ctx_cfg_cmd::bss_color_disable * @LINK_CONTEXT_MODIFY_EHT_PARAMS: covers iwl_link_ctx_cfg_cmd::puncture_mask. * This flag can be set only if the MAC that this link relates to has - * eht_support set to true. + * eht_support set to true. No longer used since _VER_3 of this command. + * @LINK_CONTEXT_MODIFY_BANDWIDTH: Covers iwl_link_ctx_cfg_cmd::modify_bandwidth. + * Request RX OMI to the AP to modify bandwidth of this link. * @LINK_CONTEXT_MODIFY_ALL: set all above flags */ enum iwl_link_ctx_modify_flags { @@ -386,6 +395,7 @@ enum iwl_link_ctx_modify_flags { LINK_CONTEXT_MODIFY_HE_PARAMS = BIT(5), LINK_CONTEXT_MODIFY_BSS_COLOR_DISABLE = BIT(6), LINK_CONTEXT_MODIFY_EHT_PARAMS = BIT(7), + LINK_CONTEXT_MODIFY_BANDWIDTH = BIT(8), LINK_CONTEXT_MODIFY_ALL = 0xff, }; /* LINK_CONTEXT_MODIFY_MASK_E_VER_1 */ @@ -427,6 +437,22 @@ enum iwl_link_ctx_flags { }; /* LINK_CONTEXT_FLAG_E_VER_1 */ /** + * enum iwl_link_modify_bandwidth - link modify (RX OMI) bandwidth + * @IWL_LINK_MODIFY_BW_20: request 20 MHz + * @IWL_LINK_MODIFY_BW_40: request 40 MHz + * @IWL_LINK_MODIFY_BW_80: request 80 MHz + * @IWL_LINK_MODIFY_BW_160: request 160 MHz + * @IWL_LINK_MODIFY_BW_320: request 320 MHz + */ +enum iwl_link_modify_bandwidth { + IWL_LINK_MODIFY_BW_20, + IWL_LINK_MODIFY_BW_40, + IWL_LINK_MODIFY_BW_80, + IWL_LINK_MODIFY_BW_160, + IWL_LINK_MODIFY_BW_320, +}; + +/** * struct iwl_link_config_cmd - command structure to configure the LINK context * in MLD API * ( LINK_CONFIG_CMD =0x9 ) @@ -447,6 +473,12 @@ enum iwl_link_ctx_flags { * @listen_lmac: indicates whether the link should be allocated on the Listen * Lmac or on the Main Lmac. Cannot be changed on an active Link. * Relevant only for eSR. + * @block_tx: tell the firmware that this link can't Tx. This should be used + * only when a link is de-activated because of CSA with mode = 1. + * Available since version 5. + * @modify_bandwidth: bandwidth request value for RX OMI (see also + * %LINK_CONTEXT_MODIFY_BANDWIDTH), from &enum iwl_link_modify_bandwidth. + * @reserved1: in version 2, listen_lmac became reserved * @cck_rates: basic rates available for CCK * @ofdm_rates: basic rates available for OFDM * @cck_short_preamble: 1 for enabling short preamble, 0 otherwise @@ -462,7 +494,7 @@ enum iwl_link_ctx_flags { * @bi: beacon interval in TU, applicable only when associated * @dtim_interval: DTIM interval in TU. * Relevant only for GO, otherwise this is offloaded. - * @puncture_mask: puncture mask for EHT + * @puncture_mask: puncture mask for EHT (removed in VER_3) * @frame_time_rts_th: HE duration RTS threshold, in units of 32us * @flags: a combination from &enum iwl_link_ctx_flags * @flags_mask: what of %flags have changed. Also &enum iwl_link_ctx_flags @@ -472,10 +504,12 @@ enum iwl_link_ctx_flags { * @bssid_index: index of the associated VAP * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame * @spec_link_id: link_id as the AP knows it - * @reserved: alignment + * @ul_mu_data_disable: OM Control UL MU Data Disable RX Support (bit 44) in + * HE MAC Capabilities information field as defined in figure 9-897 in + * IEEE802.11REVme-D5.0 * @ibss_bssid_addr: bssid for ibss * @reserved_for_ibss_bssid_addr: reserved - * @reserved1: reserved for future use + * @reserved3: reserved for future use */ struct iwl_link_config_cmd { __le32 action; @@ -486,7 +520,14 @@ struct iwl_link_config_cmd { __le16 reserved_for_local_link_addr; __le32 modify_mask; __le32 active; - __le32 listen_lmac; + union { + __le32 listen_lmac; /* only _VER_1 */ + struct { + u8 block_tx; /* since _VER_5 */ + u8 modify_bandwidth; /* since _VER_6 */ + u8 reserved1[2]; + }; + }; __le32 cck_rates; __le32 ofdm_rates; __le32 cck_short_preamble; @@ -502,27 +543,27 @@ struct iwl_link_config_cmd { struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; __le32 bi; __le32 dtim_interval; - __le16 puncture_mask; + __le16 puncture_mask; /* removed in _VER_3 */ __le16 frame_time_rts_th; __le32 flags; - __le32 flags_mask; + __le32 flags_mask; /* removed in _VER_6 */ /* The below fields are for multi-bssid */ u8 ref_bssid_addr[6]; __le16 reserved_for_ref_bssid_addr; u8 bssid_index; u8 bss_color; u8 spec_link_id; - u8 reserved; + u8 ul_mu_data_disable; u8 ibss_bssid_addr[6]; __le16 reserved_for_ibss_bssid_addr; - __le32 reserved1[8]; -} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1 */ + __le32 reserved3[8]; +} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5, _VER_6 */ /* Currently FW supports link ids in the range 0-3 and can have * at most two active links for each vif. */ -#define IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM 2 -#define IWL_MVM_FW_MAX_LINK_ID 3 +#define IWL_FW_MAX_ACTIVE_LINKS_NUM 2 +#define IWL_FW_MAX_LINK_ID 3 /** * enum iwl_fw_sta_type - FW station types @@ -544,7 +585,7 @@ enum iwl_fw_sta_type { }; /* STATION_TYPE_E_VER_1 */ /** - * struct iwl_mvm_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's + * struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's * station table * ( STA_CONFIG_CMD = 0xA ) * @@ -576,7 +617,7 @@ enum iwl_fw_sta_type { * capa * @htc_flags: which features are supported in HTC */ -struct iwl_mvm_sta_cfg_cmd { +struct iwl_sta_cfg_cmd { __le32 sta_id; __le32 link_id; u8 peer_mld_address[ETH_ALEN]; @@ -617,13 +658,13 @@ struct iwl_mvm_aux_sta_cmd { } __packed; /* AUX_STA_CMD_API_S_VER_1 */ /** - * struct iwl_mvm_remove_sta_cmd - a cmd structure to remove a sta added by + * struct iwl_remove_sta_cmd - a cmd structure to remove a sta added by * STA_CONFIG_CMD or AUX_STA_CONFIG_CMD * ( STA_REMOVE_CMD = 0xC ) * * @sta_id: index of station to remove */ -struct iwl_mvm_remove_sta_cmd { +struct iwl_remove_sta_cmd { __le32 sta_id; } __packed; /* REMOVE_STA_API_S_VER_1 */ @@ -639,4 +680,72 @@ struct iwl_mvm_sta_disable_tx_cmd { __le32 disable; } __packed; /* STA_DISABLE_TX_API_S_VER_1 */ +/** + * enum iwl_mvm_fw_esr_recommendation - FW recommendation code + * @ESR_RECOMMEND_LEAVE: recommendation to leave esr + * @ESR_FORCE_LEAVE: force exiting esr + * @ESR_RECOMMEND_ENTER: recommendation to enter esr + */ +enum iwl_mvm_fw_esr_recommendation { + ESR_RECOMMEND_LEAVE, + ESR_FORCE_LEAVE, + ESR_RECOMMEND_ENTER, +}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */ + +/** + * struct iwl_mvm_esr_mode_notif - FWs recommendation/force for esr mode + * + * @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation + */ +struct iwl_mvm_esr_mode_notif { + __le32 action; +} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */ + +/** + * struct iwl_missed_beacons_notif - sent when by the firmware upon beacon loss + * ( MISSED_BEACONS_NOTIF = 0xF6 ) + * @link_id: fw link ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @other_link_id: used in EMLSR only. The fw link ID for + * &consec_missed_beacons_other_link. IWL_MVM_FW_LINK_ID_INVALID (0xff) if + * invalid. + * @consec_missed_beacons_other_link: number of consecutive missed beacons on + * &other_link_id. + */ +struct iwl_missed_beacons_notif { + __le32 link_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 other_link_id; + __le32 consec_missed_beacons_other_link; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_5 */ + +/* + * enum iwl_esr_trans_fail_code: to be used to parse the notif below + * + * @ESR_TRANS_FAILED_TX_STATUS_ERROR: failed to TX EML OMN frame + * @ESR_TRANSITION_FAILED_TX_TIMEOUT: timeout on the EML OMN frame + * @ESR_TRANSITION_FAILED_BEACONS_NOT_HEARD: can't get a beacon on the new link + */ +enum iwl_esr_trans_fail_code { + ESR_TRANS_FAILED_TX_STATUS_ERROR, + ESR_TRANSITION_FAILED_TX_TIMEOUT, + ESR_TRANSITION_FAILED_BEACONS_NOT_HEARD, +}; + +/** + * struct iwl_esr_trans_fail_notif - FW reports a failure in EMLSR transition + * + * @link_id: the link_id that still works after the failure + * @activation: true if the link was activated, false otherwise + * @err_code: see &enum iwl_esr_trans_fail_code + */ +struct iwl_esr_trans_fail_notif { + __le32 link_id; + __le32 activation; + __le32 err_code; +} __packed; /* ESR_TRANSITION_FAILED_NTFY_API_S_VER_1 */ + #endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 55882190251c..26301c0b06a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_mac_h__ @@ -16,8 +16,8 @@ #define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) #define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) -#define IWL_MVM_STATION_COUNT_MAX 16 -#define IWL_MVM_INVALID_STA 0xFF +#define IWL_STATION_COUNT_MAX 16 +#define IWL_INVALID_STA 0xFF enum iwl_ac { AC_BK, @@ -310,6 +310,13 @@ struct iwl_ac_qos { * @filter_flags: combination of &enum iwl_mac_filter_flags * @qos_flags: from &enum iwl_mac_qos_flags * @ac: one iwl_mac_qos configuration for each AC + * @ap: AP specific config data, see &struct iwl_mac_data_ap + * @go: GO specific config data, see &struct iwl_mac_data_go + * @sta: BSS client specific config data, see &struct iwl_mac_data_sta + * @p2p_sta: P2P client specific config data, see &struct iwl_mac_data_p2p_sta + * @p2p_dev: P2P-device specific config data, see &struct iwl_mac_data_p2p_dev + * @pibss: Pseudo-IBSS specific data, unused; see struct iwl_mac_data_pibss + * @ibss: IBSS specific config data, see &struct iwl_mac_data_ibss */ struct iwl_mac_ctx_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ @@ -371,7 +378,7 @@ struct iwl_missed_beacons_notif_ver_3 { } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ /** - * struct iwl_missed_beacons_notif - information on missed beacons + * struct iwl_missed_beacons_notif_v4 - information on missed beacons * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) * @link_id: fw link ID * @consec_missed_beacons_since_last_rx: number of consecutive missed @@ -380,7 +387,7 @@ struct iwl_missed_beacons_notif_ver_3 { * @num_expected_beacons: number of expected beacons * @num_recvd_beacons: number of received beacons */ -struct iwl_missed_beacons_notif { +struct iwl_missed_beacons_notif_v4 { __le32 link_id; __le32 consec_missed_beacons_since_last_rx; __le32 consec_missed_beacons; @@ -431,8 +438,8 @@ enum iwl_he_pkt_ext_constellations { }; #define MAX_HE_SUPP_NSS 2 -#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4 -#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5 +#define MAX_CHANNEL_BW_INDX_API_D_VER_1 4 +#define MAX_CHANNEL_BW_INDX_API_D_VER_2 5 /** * struct iwl_he_pkt_ext_v1 - QAM thresholds @@ -455,7 +462,7 @@ enum iwl_he_pkt_ext_constellations { * (0-low_th, 1-high_th) */ struct iwl_he_pkt_ext_v1 { - u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2]; + u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_1][2]; } __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */ /** @@ -480,7 +487,7 @@ struct iwl_he_pkt_ext_v1 { * (0-low_th, 1-high_th) */ struct iwl_he_pkt_ext_v2 { - u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2]; + u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2]; } __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 7ec959244ffc..5cdc09d465d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -22,8 +22,10 @@ enum iwl_regulatory_and_nvm_subcmd_ids { * &struct iwl_lari_config_change_cmd_v3, * &struct iwl_lari_config_change_cmd_v4, * &struct iwl_lari_config_change_cmd_v5, - * &struct iwl_lari_config_change_cmd_v6 or - * &struct iwl_lari_config_change_cmd_v7 + * &struct iwl_lari_config_change_cmd_v6, + * &struct iwl_lari_config_change_cmd_v7, + * &struct iwl_lari_config_change_cmd_v10 or + * &struct iwl_lari_config_change_cmd */ LARI_CONFIG_CHANGE = 0x1, @@ -45,9 +47,9 @@ enum iwl_regulatory_and_nvm_subcmd_ids { SAR_OFFSET_MAPPING_TABLE_CMD = 0x4, /** - * @UATS_TABLE_CMD: &struct iwl_uats_table_cmd + * @MCC_ALLOWED_AP_TYPE_CMD: &struct iwl_mcc_allowed_ap_type_cmd */ - UATS_TABLE_CMD = 0x5, + MCC_ALLOWED_AP_TYPE_CMD = 0x5, /** * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy @@ -118,7 +120,7 @@ struct iwl_nvm_access_cmd { } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ /** - * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD + * struct iwl_nvm_access_resp - response to NVM_ACCESS_CMD * @offset: offset in bytes into the section * @length: in bytes, either how much was written or read * @type: NVM_SECTION_TYPE_* @@ -210,7 +212,7 @@ struct iwl_nvm_get_info_phy { #define IWL_NUM_CHANNELS 110 /** - * struct iwl_nvm_get_info_regulatory - regulatory information + * struct iwl_nvm_get_info_regulatory_v1 - regulatory information * @lar_enabled: is LAR enabled * @channel_profile: regulatory data of this channel * @reserved: reserved @@ -438,56 +440,101 @@ enum iwl_mcc_source { MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, }; -#define IWL_TAS_BLOCK_LIST_MAX 16 +#define IWL_WTAS_BLACK_LIST_MAX 16 /** - * struct iwl_tas_config_cmd_v2 - configures the TAS + * struct iwl_tas_config_cmd_common - configures the TAS. + * This is also the v2 structure. * @block_list_size: size of relevant field in block_list_array * @block_list_array: list of countries where TAS must be disabled */ -struct iwl_tas_config_cmd_v2 { +struct iwl_tas_config_cmd_common { __le32 block_list_size; - __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; + __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX]; } __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */ /** * struct iwl_tas_config_cmd_v3 - configures the TAS - * @block_list_size: size of relevant field in block_list_array - * @block_list_array: list of countries where TAS must be disabled * @override_tas_iec: indicates whether to override default value of IEC regulatory * @enable_tas_iec: in case override_tas_iec is set - * indicates whether IEC regulatory is enabled or disabled */ struct iwl_tas_config_cmd_v3 { - __le32 block_list_size; - __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; __le16 override_tas_iec; __le16 enable_tas_iec; } __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */ /** - * struct iwl_tas_config_cmd_v3 - configures the TAS - * @block_list_size: size of relevant field in block_list_array - * @block_list_array: list of countries where TAS must be disabled + * enum iwl_tas_uhb_allowed_flags - per country TAS UHB allowed flags. + * @TAS_UHB_ALLOWED_CANADA: TAS UHB is allowed in Canada. This flag is valid + * only when fw has %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability. + */ +enum iwl_tas_uhb_allowed_flags { + TAS_UHB_ALLOWED_CANADA = BIT(0), +}; + +/** + * struct iwl_tas_config_cmd_v4 - configures the TAS * @override_tas_iec: indicates whether to override default value of IEC regulatory * @enable_tas_iec: in case override_tas_iec is set - * indicates whether IEC regulatory is enabled or disabled * @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA - * @reserved: reserved -*/ + * @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags. + */ struct iwl_tas_config_cmd_v4 { - __le32 block_list_size; - __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; u8 override_tas_iec; u8 enable_tas_iec; u8 usa_tas_uhb_allowed; - u8 reserved; + u8 uhb_allowed_flags; } __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */ -union iwl_tas_config_cmd { - struct iwl_tas_config_cmd_v2 v2; - struct iwl_tas_config_cmd_v3 v3; - struct iwl_tas_config_cmd_v4 v4; +struct iwl_tas_config_cmd_v2_v4 { + struct iwl_tas_config_cmd_common common; + union { + struct iwl_tas_config_cmd_v3 v3; + struct iwl_tas_config_cmd_v4 v4; + }; +}; + +/** + * enum bios_source - source of bios data + * @BIOS_SOURCE_NONE: BIOS source is not defined + * @BIOS_SOURCE_ACPI: BIOS source is ACPI + * @BIOS_SOURCE_UEFI: BIOS source is UEFI + */ +enum bios_source { + BIOS_SOURCE_NONE, + BIOS_SOURCE_ACPI, + BIOS_SOURCE_UEFI, }; + +/** + * struct bios_value_u32 - BIOS configuration. + * @table_source: see &enum bios_source + * @table_revision: table revision. + * @reserved: reserved + * @value: value in bios. + */ +struct bios_value_u32 { + u8 table_source; + u8 table_revision; + u8 reserved[2]; + __le32 value; +} __packed; /* BIOS_TABLE_SOURCE_U32_S_VER_1 */ + +/** + * struct iwl_tas_config_cmd - configures the TAS. + * @block_list_size: size of relevant field in block_list_array + * @block_list_array: list of countries where TAS must be disabled + * @reserved: reserved + * @tas_config_info: see @struct bios_value_u32 + */ +struct iwl_tas_config_cmd { + __le16 block_list_size; + __le16 block_list_array[IWL_WTAS_BLACK_LIST_MAX]; + u8 reserved[2]; + struct bios_value_u32 tas_config_info; +} __packed; /* TAS_CONFIG_CMD_API_S_VER_5 */ + /** * enum iwl_lari_config_masks - bit masks for the various LARI config operations * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine @@ -613,7 +660,7 @@ struct iwl_lari_config_change_cmd_v6 { /** * struct iwl_lari_config_change_cmd_v7 - change LARI configuration - * This structure is used also for lari cmd version 8. + * This structure is used also for lari cmd version 8 and 9. * @config_bitmap: Bitmap of the config commands. Each bit will trigger a * different predefined FW config operation. * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. @@ -623,6 +670,8 @@ struct iwl_lari_config_change_cmd_v6 { * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits * per country, one to indicate whether to override and the other to * indicate allow/disallow unii4 channels. + * For LARI cmd version 4 to 8 - bits 0:3 are supported. + * For LARI cmd version 9 - bits 0:5 are supported. * @chan_state_active_bitmap: Bitmap to enable different bands per country * or region. * Each bit represents a country or region, and a band to activate @@ -646,9 +695,98 @@ struct iwl_lari_config_change_cmd_v7 { } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_7 */ /* LARI_CHANGE_CONF_CMD_S_VER_8 */ +/* LARI_CHANGE_CONF_CMD_S_VER_9 */ + +/** + * struct iwl_lari_config_change_cmd_v10 - change LARI configuration + * @config_bitmap: Bitmap of the config commands. Each bit will trigger a + * different predefined FW config operation. + * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. + * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits + * per country, one to indicate whether to override and the other to + * indicate the value to use. + * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits + * per country, one to indicate whether to override and the other to + * indicate allow/disallow unii4 channels. + * For LARI cmd version 10 - bits 0:5 are supported. + * @chan_state_active_bitmap: Bitmap to enable different bands per country + * or region. + * Each bit represents a country or region, and a band to activate + * according to the BIOS definitions. + * For LARI cmd version 10 - bits 0:4 are supported. + * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. + * Each bit represents a set of channels in a specific band that should be + * disabled + * @edt_bitmap: Bitmap of energy detection threshold table. + * Disable/enable the EDT optimization method for different band. + * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC. + * bit0: enable 320Mhz in Japan. + * bit1: enable 320Mhz in South Korea. + * bit 2 - 31: reserved. + */ +struct iwl_lari_config_change_cmd_v10 { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; + __le32 oem_unii4_allow_bitmap; + __le32 chan_state_active_bitmap; + __le32 force_disable_channels_bitmap; + __le32 edt_bitmap; + __le32 oem_320mhz_allow_bitmap; +} __packed; +/* LARI_CHANGE_CONF_CMD_S_VER_10 */ + +/** + * struct iwl_lari_config_change_cmd - change LARI configuration + * @config_bitmap: Bitmap of the config commands. Each bit will trigger a + * different predefined FW config operation. + * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. + * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits + * per country, one to indicate whether to override and the other to + * indicate the value to use. + * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits + * per country, one to indicate whether to override and the other to + * indicate allow/disallow unii4 channels. + * For LARI cmd version 11 - bits 0:5 are supported. + * @chan_state_active_bitmap: Bitmap to enable different bands per country + * or region. + * Each bit represents a country or region, and a band to activate + * according to the BIOS definitions. + * For LARI cmd version 11 - bits 0:4 are supported. + * For LARI cmd version 12 - bits 0:6 are supported and bits 7:31 are + * reserved. No need to mask out the reserved bits. + * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. + * Each bit represents a set of channels in a specific band that should be + * disabled + * @edt_bitmap: Bitmap of energy detection threshold table. + * Disable/enable the EDT optimization method for different band. + * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC. + * bit0: enable 320Mhz in Japan. + * bit1: enable 320Mhz in South Korea. + * bit 2 - 31: reserved. + * @oem_11be_allow_bitmap: Bitmap of 11be allowed MCCs. No need to mask out the + * unsupported bits + * bit0: enable 11be in China(CB/CN). + * bit1: enable 11be in South Korea. + * bit 2 - 31: reserved. + */ +struct iwl_lari_config_change_cmd { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; + __le32 oem_unii4_allow_bitmap; + __le32 chan_state_active_bitmap; + __le32 force_disable_channels_bitmap; + __le32 edt_bitmap; + __le32 oem_320mhz_allow_bitmap; + __le32 oem_11be_allow_bitmap; +} __packed; +/* LARI_CHANGE_CONF_CMD_S_VER_11 */ +/* LARI_CHANGE_CONF_CMD_S_VER_12 */ /* Activate UNII-1 (5.2GHz) for World Wide */ -#define ACTIVATE_5G2_IN_WW_MASK BIT(4) +#define ACTIVATE_5G2_IN_WW_MASK BIT(4) +#define CHAN_STATE_ACTIVE_BITMAP_CMD_V11 0x1F /** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete @@ -662,13 +800,13 @@ struct iwl_pnvm_init_complete_ntfy { #define UATS_TABLE_COL_SIZE 13 /** - * struct iwl_uats_table_cmd - struct for UATS_TABLE_CMD + * struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD * @offset_map: mapping a mcc to UHB AP type support (UATS) allowed * @reserved: reserved */ -struct iwl_uats_table_cmd { +struct iwl_mcc_allowed_ap_type_cmd { u8 offset_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE]; __le16 reserved; -} __packed; /* UATS_TABLE_CMD_S_VER_1 */ +} __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_nvm_reg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h index 2d2b9c8c36ea..9b09b835560b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -3,7 +3,7 @@ * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2021-2023 Intel Corporation + * Copyright (C) 2021-2024 Intel Corporation */ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ @@ -20,7 +20,7 @@ enum iwl_prot_offload_subcmd_ids { /** * @WOWLAN_INFO_NOTIFICATION: Notification in * &struct iwl_wowlan_info_notif_v1, &struct iwl_wowlan_info_notif_v2, - * or iwl_wowlan_info_notif + * or &struct iwl_wowlan_info_notif */ WOWLAN_INFO_NOTIFICATION = 0xFD, @@ -31,7 +31,7 @@ enum iwl_prot_offload_subcmd_ids { /** * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif_v2 or - * &struct iwl_stored_beacon_notif_v3 + * &struct iwl_stored_beacon_notif */ STORED_BEACON_NTF = 0xFF, }; @@ -60,7 +60,7 @@ struct iwl_stored_beacon_notif_common { } __packed; /** - * struct iwl_stored_beacon_notif - Stored beacon notification + * struct iwl_stored_beacon_notif_v2 - Stored beacon notification * * @common: fields common for all versions * @data: beacon data, length in @byte_count @@ -71,18 +71,18 @@ struct iwl_stored_beacon_notif_v2 { } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ /** - * struct iwl_stored_beacon_notif_v3 - Stored beacon notification + * struct iwl_stored_beacon_notif - Stored beacon notification * * @common: fields common for all versions * @sta_id: station for which the beacon was received * @reserved: reserved for alignment * @data: beacon data, length in @byte_count */ -struct iwl_stored_beacon_notif_v3 { +struct iwl_stored_beacon_notif { struct iwl_stored_beacon_notif_common common; u8 sta_id; u8 reserved[3]; u8 data[MAX_STORED_BEACON_SIZE]; -} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3 */ +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3, _VER_4 */ #endif /* __iwl_fw_api_offload_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index 306ed88de463..4d8a12799c4d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -113,7 +113,7 @@ struct iwl_phy_context_cmd_tail { } __packed; /** - * struct iwl_phy_context_cmd - config of the PHY context + * struct iwl_phy_context_cmd_v1 - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) * @id_and_color: ID and color of the relevant Binding * @action: action to perform, see &enum iwl_ctxt_action @@ -142,6 +142,9 @@ struct iwl_phy_context_cmd_v1 { * @lmac_id: the lmac id the phy context belongs to * @ci: channel info * @rxchain_info: ??? + * @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz + * @sbb_ctrl_channel_loc: location of the control channel + * @puncture_mask: bitmap of punctured subchannels * @dsp_cfg_flags: set to 0 * @reserved: reserved to align to 64 bit */ @@ -152,9 +155,20 @@ struct iwl_phy_context_cmd { /* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */ struct iwl_fw_channel_info ci; __le32 lmac_id; - __le32 rxchain_info; /* reserved in _VER_4 */ + union { + __le32 rxchain_info; /* reserved in _VER_4 */ + struct { /* used for _VER_5/_VER_6 */ + u8 sbb_bandwidth; + u8 sbb_ctrl_channel_loc; + __le16 puncture_mask; /* added in VER_6 */ + }; + }; __le32 dsp_cfg_flags; __le32 reserved; -} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */ +} __packed; /* PHY_CONTEXT_CMD_API_VER_3, + * PHY_CONTEXT_CMD_API_VER_4, + * PHY_CONTEXT_CMD_API_VER_5, + * PHY_CONTEXT_CMD_API_VER_6 + */ #endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h index 5a3f30e5e06d..c73d4d597857 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2022 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -43,6 +43,11 @@ enum iwl_phy_ops_subcmd_ids { PER_PLATFORM_ANT_GAIN_CMD = 0x07, /** + * @AP_TX_POWER_CONSTRAINTS_CMD: &struct iwl_txpower_constraints_cmd + */ + AP_TX_POWER_CONSTRAINTS_CMD = 0x0C, + + /** * @CT_KILL_NOTIFICATION: &struct ct_kill_notif */ CT_KILL_NOTIFICATION = 0xFE, @@ -190,7 +195,7 @@ struct ct_kill_notif { } __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */ /** -* enum ctdp_cmd_operation - CTDP command operations +* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations * @CTDP_CMD_OPERATION_START: update the current budget * @CTDP_CMD_OPERATION_STOP: stop ctdp * @CTDP_CMD_OPERATION_REPORT: get the average budget diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 040d83fa5424..37ec26596ee7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -266,7 +266,7 @@ struct iwl_reduce_tx_power_cmd { } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ enum iwl_dev_tx_power_cmd_mode { - IWL_TX_POWER_MODE_SET_MAC = 0, + IWL_TX_POWER_MODE_SET_LINK = 0, IWL_TX_POWER_MODE_SET_DEVICE = 1, IWL_TX_POWER_MODE_SET_CHAINS = 2, IWL_TX_POWER_MODE_SET_ACK = 3, @@ -283,20 +283,16 @@ enum iwl_dev_tx_power_cmd_mode { /** * struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd * @set_mode: see &enum iwl_dev_tx_power_cmd_mode - * @mac_context_id: id of the mac ctx for which we are reducing TX power. + * @link_id: id of the link ctx for which we are reducing TX power. + * For version 9 / 10, this is the link id. For earlier versions, it is + * the mac id. * @pwr_restriction: TX power restriction in 1/8 dBms. - * @dev_24: device TX power restriction in 1/8 dBms - * @dev_52_low: device TX power restriction upper band - low - * @dev_52_high: device TX power restriction upper band - high */ struct iwl_dev_tx_power_common { __le32 set_mode; - __le32 mac_context_id; + __le32 link_id; __le16 pwr_restriction; - __le16 dev_24; - __le16 dev_52_low; - __le16 dev_52_high; -}; +} __packed; /** * struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3 @@ -385,25 +381,122 @@ struct iwl_dev_tx_power_cmd_v7 { __le32 timer_period; __le32 flags; } __packed; /* TX_REDUCED_POWER_API_S_VER_7 */ + /** - * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) + * struct iwl_dev_tx_power_cmd_v8 - TX power reduction command version 8 + * @per_chain: per chain restrictions + * @enable_ack_reduction: enable or disable close range ack TX power + * reduction. + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + * @flags: reduce power flags. + * @tpc_vlp_backoff_level: user backoff of UNII5,7 VLP channels in USA. + * Not in use. + */ +struct iwl_dev_tx_power_cmd_v8 { + __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; + __le32 flags; + __le32 tpc_vlp_backoff_level; +} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */ + +/* + * @dev_24: device TX power restriction in 1/8 dBms + * @dev_52_low: device TX power restriction upper band - low + * @dev_52_high: device TX power restriction upper band - high + */ +struct iwl_dev_tx_power_cmd_per_band { + __le16 dev_24; + __le16 dev_52_low; + __le16 dev_52_high; +} __packed; + +/** + * struct iwl_dev_tx_power_cmd_v3_v8 - TX power reduction command (multiversion) + * @per_band: per band restrictions * @common: common part of the command * @v3: version 3 part of the command * @v4: version 4 part of the command * @v5: version 5 part of the command * @v6: version 6 part of the command + * @v7: version 7 part of the command + * @v8: version 8 part of the command */ -struct iwl_dev_tx_power_cmd { +struct iwl_dev_tx_power_cmd_v3_v8 { struct iwl_dev_tx_power_common common; + struct iwl_dev_tx_power_cmd_per_band per_band; union { struct iwl_dev_tx_power_cmd_v3 v3; struct iwl_dev_tx_power_cmd_v4 v4; struct iwl_dev_tx_power_cmd_v5 v5; struct iwl_dev_tx_power_cmd_v6 v6; struct iwl_dev_tx_power_cmd_v7 v7; + struct iwl_dev_tx_power_cmd_v8 v8; }; }; +/** + * struct iwl_dev_tx_power_cmd_v9 - TX power reduction cmd + * @reserved: reserved (padding) + * @per_chain: per chain restrictions + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved1: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + */ +struct iwl_dev_tx_power_cmd_v9 { + __le16 reserved; + __le16 per_chain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; + u8 per_chain_restriction_changed; + u8 reserved1[3]; + __le32 timer_period; +} __packed; /* TX_REDUCED_POWER_API_S_VER_9 */ + +/** + * struct iwl_dev_tx_power_cmd_v10 - TX power reduction cmd + * @per_chain: per chain restrictions + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + * @flags: reduce power flags. + */ +struct iwl_dev_tx_power_cmd_v10 { + __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; + u8 per_chain_restriction_changed; + u8 reserved; + __le32 timer_period; + __le32 flags; +} __packed; /* TX_REDUCED_POWER_API_S_VER_10 */ + +/* + * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) + * @common: common part of the command + * @v9: version 9 part of the command + * @v10: version 10 part of the command + */ +struct iwl_dev_tx_power_cmd { + struct iwl_dev_tx_power_common common; + union { + struct iwl_dev_tx_power_cmd_v9 v9; + struct iwl_dev_tx_power_cmd_v10 v10; + }; +} __packed; /* TX_REDUCED_POWER_API_S_VER_9_VER10 */ + #define IWL_NUM_GEO_PROFILES 3 #define IWL_NUM_GEO_PROFILES_V3 8 #define IWL_NUM_BANDS_PER_CHAIN_V1 2 @@ -432,7 +525,7 @@ struct iwl_per_chain_offset { } __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ /** - * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. */ @@ -442,7 +535,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 { } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */ /** - * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) @@ -454,7 +547,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 { } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */ /** - * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) @@ -466,7 +559,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 { } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */ /** - * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) @@ -478,7 +571,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 { } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */ /** - * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) @@ -506,15 +599,45 @@ struct iwl_geo_tx_power_profiles_resp { } __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */ /** + * enum iwl_ppag_flags - PPAG enable masks + * @IWL_PPAG_ETSI_MASK: enable PPAG in ETSI + * @IWL_PPAG_CHINA_MASK: enable PPAG in China + * @IWL_PPAG_ETSI_LPI_UHB_MASK: enable LPI in ETSI for UHB + * @IWL_PPAG_ETSI_VLP_UHB_MASK: enable VLP in ETSI for UHB + * @IWL_PPAG_ETSI_SP_UHB_MASK: enable SP in ETSI for UHB + * @IWL_PPAG_USA_LPI_UHB_MASK: enable LPI in USA for UHB + * @IWL_PPAG_USA_VLP_UHB_MASK: enable VLP in USA for UHB + * @IWL_PPAG_USA_SP_UHB_MASK: enable SP in USA for UHB + * @IWL_PPAG_CANADA_LPI_UHB_MASK: enable LPI in CANADA for UHB + * @IWL_PPAG_CANADA_VLP_UHB_MASK: enable VLP in CANADA for UHB + * @IWL_PPAG_CANADA_SP_UHB_MASK: enable SP in CANADA for UHB + */ +enum iwl_ppag_flags { + IWL_PPAG_ETSI_MASK = BIT(0), + IWL_PPAG_CHINA_MASK = BIT(1), + IWL_PPAG_ETSI_LPI_UHB_MASK = BIT(2), + IWL_PPAG_ETSI_VLP_UHB_MASK = BIT(3), + IWL_PPAG_ETSI_SP_UHB_MASK = BIT(4), + IWL_PPAG_USA_LPI_UHB_MASK = BIT(5), + IWL_PPAG_USA_VLP_UHB_MASK = BIT(6), + IWL_PPAG_USA_SP_UHB_MASK = BIT(7), + IWL_PPAG_CANADA_LPI_UHB_MASK = BIT(8), + IWL_PPAG_CANADA_VLP_UHB_MASK = BIT(9), + IWL_PPAG_CANADA_SP_UHB_MASK = BIT(10), +}; + +/** * union iwl_ppag_table_cmd - union for all versions of PPAG command * @v1: version 1 * @v2: version 2 - * - * @flags: bit 0 - indicates enablement of PPAG for ETSI - * bit 1 - indicates enablement of PPAG for CHINA BIOS - * bit 1 can be used only in v3 (identical to v2) - * @gain: table of antenna gain values per chain and sub-band - * @reserved: reserved + * version 3, 4, 5 and 6 are the same structure as v2, + * but has a different format of the flags bitmap + * @v1.flags: values from &enum iwl_ppag_flags + * @v1.gain: table of antenna gain values per chain and sub-band + * @v1.reserved: reserved + * @v2.flags: values from &enum iwl_ppag_flags + * @v2.gain: table of antenna gain values per chain and sub-band + * @v2.reserved: reserved */ union iwl_ppag_table_cmd { struct { @@ -529,6 +652,11 @@ union iwl_ppag_table_cmd { } v2; } __packed; +#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK) +#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \ + IWL_PPAG_ETSI_LPI_UHB_MASK | \ + IWL_PPAG_USA_LPI_UHB_MASK) + #define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26 #define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13 @@ -670,4 +798,44 @@ struct iwl_beacon_filter_cmd { #define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) #define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) + +#define DEFAULT_TPE_TX_POWER 0x7F + +/* + * Bandwidth: 20/40/80/(160/80+80)/320 + */ +#define IWL_MAX_TX_EIRP_PWR_MAX_SIZE 5 +#define IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE 16 + +enum iwl_6ghz_ap_type { + IWL_6GHZ_AP_TYPE_LPI, + IWL_6GHZ_AP_TYPE_SP, + IWL_6GHZ_AP_TYPE_VLP, +}; /* PHY_AP_TYPE_API_E_VER_1 */ + +/** + * struct iwl_txpower_constraints_cmd + * AP_TX_POWER_CONSTRAINTS_CMD + * Used for VLP/LPI/AFC Access Point power constraints for 6GHz channels + * @link_id: linkId + * @ap_type: see &enum iwl_ap_type + * @eirp_pwr: 8-bit 2s complement signed integer in the range + * -64 dBm to 63 dBm with a 0.5 dB step + * default &DEFAULT_TPE_TX_POWER (no maximum limit) + * @psd_pwr: 8-bit 2s complement signed integer in the range + * -63.5 to +63 dBm/MHz with a 0.5 step + * value - 128 indicates that the corresponding 20 + * MHz channel cannot be used for transmission. + * value +127 indicates that no maximum PSD limit + * is specified for the corresponding 20 MHz channel + * default &DEFAULT_TPE_TX_POWER (no maximum limit) + * @reserved: reserved (padding) + */ +struct iwl_txpower_constraints_cmd { + __le16 link_id; + __le16 ap_type; + __s8 eirp_pwr[IWL_MAX_TX_EIRP_PWR_MAX_SIZE]; + __s8 psd_pwr[IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE]; + u8 reserved[3]; +} __packed; /* PHY_AP_TX_POWER_CONSTRAINTS_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_power_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index a1a272433b09..1a60f0cdf972 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_rs_h__ @@ -9,7 +9,7 @@ #include "mac.h" /** - * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags + * enum iwl_tlc_mng_cfg_flags - options for TLC config flags * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for * bandwidths <= 80MHz * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index e71f29d0c694..691c879cb90d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -710,7 +710,15 @@ struct iwl_rx_mpdu_desc { __le32 reorder_data; union { + /** + * @v1: version 1 of the remaining RX descriptor, + * see &struct iwl_rx_mpdu_desc_v1 + */ struct iwl_rx_mpdu_desc_v1 v1; + /** + * @v3: version 3 of the remaining RX descriptor, + * see &struct iwl_rx_mpdu_desc_v3 + */ struct iwl_rx_mpdu_desc_v3 v3; }; } __packed; /* RX_MPDU_RES_START_API_S_VER_3, @@ -976,7 +984,7 @@ struct iwl_ba_window_status_notif { } __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ /** - * struct iwl_rfh_queue_config - RX queue configuration + * struct iwl_rfh_queue_data - RX queue configuration * @q_num: Q num * @enable: enable queue * @reserved: alignment diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 93078f8cc08c..f486d624500b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -14,6 +14,10 @@ */ enum iwl_scan_subcmd_ids { /** + * @CHANNEL_SURVEY_NOTIF: &struct iwl_umac_scan_channel_survey_notif + */ + CHANNEL_SURVEY_NOTIF = 0xFB, + /** * @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info */ OFFLOAD_MATCH_INFO_NOTIF = 0xFC, @@ -62,6 +66,8 @@ struct iwl_ssid_ie { #define IWL_FAST_SCHED_SCAN_ITERATIONS 3 #define IWL_MAX_SCHED_SCAN_PLANS 2 +#define IWL_MAX_NUM_NOISE_RESULTS 22 + enum scan_framework_client { SCAN_CLIENT_SCHED_SCAN = BIT(0), SCAN_CLIENT_NETDETECT = BIT(1), @@ -143,7 +149,7 @@ struct iwl_scan_offload_profile_cfg_data { } __packed; /** - * struct iwl_scan_offload_profile_cfg + * struct iwl_scan_offload_profile_cfg_v1 - scan offload profile config * @profiles: profiles to search for match * @data: the rest of the data for profile_cfg */ @@ -417,7 +423,7 @@ struct iwl_lmac_scan_complete_notif { } __packed; /** - * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 + * struct iwl_periodic_scan_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 * @last_schedule_line: last schedule line executed (fast or regular) * @last_schedule_iteration: last scan iteration executed before scan abort * @status: &enum iwl_scan_offload_complete_status @@ -437,10 +443,10 @@ struct iwl_periodic_scan_complete { /* UMAC Scan API */ /* The maximum of either of these cannot exceed 8, because we use an - * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). + * 8-bit mask (see enum iwl_scan_status). */ -#define IWL_MVM_MAX_UMAC_SCANS 4 -#define IWL_MVM_MAX_LMAC_SCANS 1 +#define IWL_MAX_UMAC_SCANS 4 +#define IWL_MAX_LMAC_SCANS 1 enum scan_config_flags { SCAN_CONFIG_FLAG_ACTIVATE = BIT(0), @@ -642,10 +648,13 @@ enum iwl_umac_scan_general_flags { * notification per channel or not. * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel * reorder optimization or not. + * @IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS: Enable channel statistics + * collection when #IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE is set. */ enum iwl_umac_scan_general_flags2 { IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1), + IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS = BIT(3), }; /** @@ -722,39 +731,46 @@ enum iwl_umac_scan_general_params_flags2 { * struct iwl_scan_channel_cfg_umac * @flags: bitmap - 0-19: directed scan to i'th ssid. * @channel_num: channel number 1-13 etc. - * @band: band of channel: 0 for 2GHz, 1 for 5GHz - * @iter_count: repetition count for the channel. - * @iter_interval: interval between two scan iterations on one channel. + * @v1: command version 1 + * @v1.iter_count: repetition count for the channel. + * @v1.iter_interval: interval between two scan iterations on one channel. + * @v2: command versions 2-4 + * @v2.band: band of channel: 0 for 2GHz, 1 for 5GHz + * @v2.iter_count: repetition count for the channel. + * @v2.iter_interval: interval between two scan iterations on one channel. + * @v5: command versions 5 and up + * @v5.iter_count: repetition count for the channel. + * @v5.iter_interval: interval between two scan iterations on one channel. + * @v5.psd_20: highest PSD value for all APs known so far + * on this channel. */ struct iwl_scan_channel_cfg_umac { #define IWL_CHAN_CFG_FLAGS_BAND_POS 30 __le32 flags; + u8 channel_num; /* All versions are of the same size, so use a union without adjusting * the command size later */ union { struct { - u8 channel_num; u8 iter_count; __le16 iter_interval; - } v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */ + } __packed v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */ struct { - u8 channel_num; u8 band; u8 iter_count; u8 iter_interval; - } v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2 - * SCAN_CHANNEL_CONFIG_API_S_VER_3 - * SCAN_CHANNEL_CONFIG_API_S_VER_4 - */ + } __packed v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2 + * SCAN_CHANNEL_CONFIG_API_S_VER_3 + * SCAN_CHANNEL_CONFIG_API_S_VER_4 + */ struct { - u8 channel_num; u8 psd_20; u8 iter_count; u8 iter_interval; - } v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */ - }; + } __packed v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */ + } __packed; } __packed; /** @@ -780,7 +796,7 @@ struct iwl_scan_req_umac_tail_v1 { } __packed; /** - * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command + * struct iwl_scan_req_umac_tail_v2 - the rest of the UMAC scan request command * parameters following channels configuration array. * @schedule: two scheduling plans. * @delay: delay in TUs before starting the first scan iteration @@ -1076,7 +1092,7 @@ struct iwl_scan_req_params_v12 { } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */ /** - * struct iwl_scan_req_params_v16 + * struct iwl_scan_req_params_v17 - scan request parameters (v17) * @general_params: &struct iwl_scan_general_params_v11 * @channel_params: &struct iwl_scan_channel_params_v7 * @periodic_params: &struct iwl_scan_periodic_parms_v1 @@ -1102,7 +1118,7 @@ struct iwl_scan_req_umac_v12 { } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */ /** - * struct iwl_scan_req_umac_v16 + * struct iwl_scan_req_umac_v17 - scan request command (v17) * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @scan_params: scan parameters @@ -1124,6 +1140,19 @@ struct iwl_umac_scan_abort { } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ /** + * enum iwl_umac_scan_abort_status + * + * @IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS: scan was successfully aborted + * @IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS: scan abort is in progress + * @IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND: nothing to abort + */ +enum iwl_umac_scan_abort_status { + IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS = 0, + IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS, + IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND, +}; + +/** * struct iwl_umac_scan_complete * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @last_schedule: last scheduling line @@ -1258,4 +1287,26 @@ struct iwl_umac_scan_iter_complete_notif { struct iwl_scan_results_notif results[]; } __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */ +/** + * struct iwl_umac_scan_channel_survey_notif - data for survey + * @channel: the channel scanned + * @band: band of channel + * @noise: noise floor measurements in negative dBm, invalid 0xff + * @reserved: for future use and alignment + * @active_time: time in ms the radio was turned on (on the channel) + * @busy_time: time in ms the channel was sensed busy, 0 for a clean channel + * @tx_time: time the radio spent transmitting data + * @rx_time: time the radio spent receiving data + */ +struct iwl_umac_scan_channel_survey_notif { + __le32 channel; + __le32 band; + u8 noise[IWL_MAX_NUM_NOISE_RESULTS]; + u8 reserved[2]; + __le32 active_time; + __le32 busy_time; + __le32 tx_time; + __le32 rx_time; +} __packed; /* SCAN_CHANNEL_SURVEY_NTF_API_S_VER_1 */ + #endif /* __iwl_fw_api_scan_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index d62fed543276..d7f8a276b683 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021, 2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -109,6 +109,7 @@ enum iwl_sta_flags { * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from * station info array (1 - n 1X mode) + * @STA_KEY_FLG_AMSDU_SPP: SPP (signaling and payload protected) A-MSDU * @STA_KEY_FLG_KEYID_MSK: the index of the key * @STA_KEY_FLG_KEYID_POS: key index bit position * @STA_KEY_NOT_VALID: key is invalid @@ -129,6 +130,7 @@ enum iwl_sta_key_flag { STA_KEY_FLG_EN_MSK = (7 << 0), STA_KEY_FLG_WEP_KEY_MAP = BIT(3), + STA_KEY_FLG_AMSDU_SPP = BIT(7), STA_KEY_FLG_KEYID_POS = 8, STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS), STA_KEY_NOT_VALID = BIT(11), diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index 2271b19213fa..0a9f14fb04be 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 - 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -327,14 +327,14 @@ struct mvm_statistics_load { __le32 air_time[MAC_INDEX_AUX]; __le32 byte_count[MAC_INDEX_AUX]; __le32 pkt_count[MAC_INDEX_AUX]; - u8 avg_energy[IWL_MVM_STATION_COUNT_MAX]; + u8 avg_energy[IWL_STATION_COUNT_MAX]; } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */ struct mvm_statistics_load_v1 { __le32 air_time[NUM_MAC_INDEX]; __le32 byte_count[NUM_MAC_INDEX]; __le32 pkt_count[NUM_MAC_INDEX]; - u8 avg_energy[IWL_MVM_STATION_COUNT_MAX]; + u8 avg_energy[IWL_STATION_COUNT_MAX]; } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */ struct mvm_statistics_rx { @@ -594,7 +594,7 @@ struct iwl_stats_ntfy_per_sta { } __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */ #define IWL_STATS_MAX_PHY_OPERATIONAL 3 -#define IWL_STATS_MAX_FW_LINKS (IWL_MVM_FW_MAX_LINK_ID + 1) +#define IWL_STATS_MAX_FW_LINKS (IWL_FW_MAX_LINK_ID + 1) /** * struct iwl_system_statistics_notif_oper @@ -608,7 +608,7 @@ struct iwl_system_statistics_notif_oper { __le32 time_stamp; struct iwl_stats_ntfy_per_link per_link[IWL_STATS_MAX_FW_LINKS]; struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL]; - struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX]; + struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX]; } __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */ /** @@ -651,7 +651,7 @@ struct iwl_statistics_operational_ntfy { __le32 flags; struct iwl_stats_ntfy_per_mac per_mac[MAC_INDEX_AUX]; struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL]; - struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX]; + struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX]; __le64 rx_time; __le64 tx_time; __le64 on_time_rf; @@ -699,7 +699,7 @@ struct iwl_statistics_operational_ntfy_ver_14 { __le64 tx_time; __le64 on_time_rf; __le64 on_time_scan; - __le32 average_energy[IWL_MVM_STATION_COUNT_MAX]; + __le32 average_energy[IWL_STATION_COUNT_MAX]; __le32 reserved; } __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_14 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h index 893438aadab0..cfa6532a3cdd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -10,7 +10,7 @@ #include "fw/api/tx.h" #include "fw/api/phy-ctxt.h" -#define IWL_MVM_TDLS_STA_COUNT 4 +#define IWL_TDLS_STA_COUNT 4 /* Type of TDLS request */ enum iwl_tdls_channel_switch_type { @@ -128,7 +128,7 @@ struct iwl_tdls_config_cmd { u8 tdls_peer_count; u8 tx_to_ap_tid; __le16 tx_to_ap_ssn; - struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; + struct iwl_tdls_sta_info sta_info[IWL_TDLS_STA_COUNT]; __le32 pti_req_data_offset; struct iwl_tx_cmd pti_req_tx_cmd; @@ -155,7 +155,7 @@ struct iwl_tdls_config_sta_info_res { */ struct iwl_tdls_config_res { __le32 tx_to_ap_last_seq; - struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; + struct iwl_tdls_config_sta_info_res sta_info[IWL_TDLS_STA_COUNT]; } __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ #endif /* __iwl_fw_api_tdls_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 2e15be71c957..18d030334a6a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020, 2022-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -340,11 +340,13 @@ struct iwl_hs20_roc_res { * @ROC_ACTIVITY_HOTSPOT: ROC for hs20 activity * @ROC_ACTIVITY_P2P_DISC: ROC for p2p discoverability activity * @ROC_ACTIVITY_P2P_TXRX: ROC for p2p action frames activity + * @ROC_ACTIVITY_P2P_NEG: ROC for p2p negotiation (used also for TX) */ enum iwl_roc_activity { ROC_ACTIVITY_HOTSPOT, ROC_ACTIVITY_P2P_DISC, ROC_ACTIVITY_P2P_TXRX, + ROC_ACTIVITY_P2P_NEG, ROC_NUM_ACTIVITIES }; /* ROC_ACTIVITY_API_E_VER_1 */ @@ -393,7 +395,7 @@ struct iwl_roc_notif { } __packed; /* ROC_NOTIF_API_S_VER_1 */ /** - * enum iwl_mvm_session_prot_conf_id - session protection's configurations + * enum iwl_session_prot_conf_id - session protection's configurations * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association. * The firmware will allocate two events. * Valid for BSS_STA and P2P_STA. @@ -422,7 +424,7 @@ struct iwl_roc_notif { * be taken into account. * @SESSION_PROTECT_CONF_MAX_ID: not used */ -enum iwl_mvm_session_prot_conf_id { +enum iwl_session_prot_conf_id { SESSION_PROTECT_CONF_ASSOC, SESSION_PROTECT_CONF_GO_CLIENT_ASSOC, SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV, @@ -431,12 +433,12 @@ enum iwl_mvm_session_prot_conf_id { }; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */ /** - * struct iwl_mvm_session_prot_cmd - configure a session protection + * struct iwl_session_prot_cmd - configure a session protection * @id_and_color: the id and color of the link (or mac, for command version 1) * for which this session protection is sent * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE, * see &enum iwl_ctxt_action - * @conf_id: see &enum iwl_mvm_session_prot_conf_id + * @conf_id: see &enum iwl_session_prot_conf_id * @duration_tu: the duration of the whole protection in TUs. * @repetition_count: not used * @interval: not used @@ -446,7 +448,7 @@ enum iwl_mvm_session_prot_conf_id { * The firmware supports only one concurrent session protection per vif. * Adding a new session protection will remove any currently running session. */ -struct iwl_mvm_session_prot_cmd { +struct iwl_session_prot_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ __le32 id_and_color; __le32 action; @@ -460,17 +462,17 @@ struct iwl_mvm_session_prot_cmd { */ /** - * struct iwl_mvm_session_prot_notif - session protection started / ended + * struct iwl_session_prot_notif - session protection started / ended * @mac_link_id: the mac id (or link id, for notif ver > 2) for which the * session protection started / ended * @status: 1 means success, 0 means failure * @start: 1 means the session protection started, 0 means it ended - * @conf_id: see &enum iwl_mvm_session_prot_conf_id + * @conf_id: see &enum iwl_session_prot_conf_id * * Note that any session protection will always get two notifications: start * and end even the firmware could not schedule it. */ -struct iwl_mvm_session_prot_notif { +struct iwl_session_prot_notif { __le32 mac_link_id; __le32 status; __le32 start; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 842360b1e995..0a39e4b6eb62 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_tx_h__ @@ -76,6 +76,8 @@ enum iwl_tx_flags { * to a secured STA * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate * selection, retry limits and BT kill + * @IWL_TX_FLAGS_RTS: firmware used an RTS + * @IWL_TX_FLAGS_CTS: firmware used CTS-to-self */ enum iwl_tx_cmd_flags { IWL_TX_FLAGS_CMD_RATE = BIT(0), @@ -189,7 +191,7 @@ enum iwl_tx_offload_assist_flags_pos { * cleared. Combination of RATE_MCS_* * @sta_id: index of destination station in FW station table * @sec_ctl: security control, TX_CMD_SEC_* - * @initial_rate_index: index into the the rate table for initial TX attempt. + * @initial_rate_index: index into the rate table for initial TX attempt. * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames. * @reserved2: reserved * @key: security key @@ -296,8 +298,7 @@ struct iwl_tx_cmd_gen3 { __le32 rate_n_flags; u8 reserved[8]; struct ieee80211_hdr hdr[]; -} __packed; /* TX_CMD_API_S_VER_8, - TX_CMD_API_S_VER_10 */ +} __packed; /* TX_CMD_API_S_VER_8, TX_CMD_API_S_VER_10 */ /* * TX response related data @@ -480,11 +481,11 @@ struct agg_tx_status { #define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\ TX_RES_RATE_TABLE_COLOR_POS) -#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) -#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) +#define IWL_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) +#define IWL_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) /** - * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet + * struct iwl_tx_resp_v3 - notifies that fw is TXing a packet * ( REPLY_TX = 0x1c ) * @frame_count: 1 no aggregation, >1 aggregation * @bt_kill_count: num of times blocked by bluetooth (unused for agg) @@ -515,7 +516,7 @@ struct agg_tx_status { * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. */ -struct iwl_mvm_tx_resp_v3 { +struct iwl_tx_resp_v3 { u8 frame_count; u8 bt_kill_count; u8 failure_rts; @@ -541,7 +542,7 @@ struct iwl_mvm_tx_resp_v3 { } __packed; /* TX_RSP_API_S_VER_3 */ /** - * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet + * struct iwl_tx_resp - notifies that fw is TXing a packet * ( REPLY_TX = 0x1c ) * @frame_count: 1 no aggregation, >1 aggregation * @bt_kill_count: num of times blocked by bluetooth (unused for agg) @@ -573,7 +574,7 @@ struct iwl_mvm_tx_resp_v3 { * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. */ -struct iwl_mvm_tx_resp { +struct iwl_tx_resp { u8 frame_count; u8 bt_kill_count; u8 failure_rts; @@ -599,7 +600,8 @@ struct iwl_mvm_tx_resp { __le16 reserved2; struct agg_tx_status status; } __packed; /* TX_RSP_API_S_VER_6, - TX_RSP_API_S_VER_7 */ + TX_RSP_API_S_VER_7, + TX_RSP_API_S_VER_8 */ /** * struct iwl_mvm_ba_notif - notifies about reception of BA @@ -636,14 +638,14 @@ struct iwl_mvm_ba_notif { } __packed; /** - * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue + * struct iwl_compressed_ba_tfd - progress of a TFD queue * @q_num: TFD queue number * @tfd_index: Index of first un-acked frame in the TFD queue * @scd_queue: For debug only - the physical queue the TFD queue is bound to * @tid: TID of the queue (0-7) * @reserved: reserved for alignment */ -struct iwl_mvm_compressed_ba_tfd { +struct iwl_compressed_ba_tfd { __le16 q_num; __le16 tfd_index; u8 scd_queue; @@ -652,12 +654,12 @@ struct iwl_mvm_compressed_ba_tfd { } __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */ /** - * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue + * struct iwl_compressed_ba_ratid - progress of a RA TID queue * @q_num: RA TID queue number * @tid: TID of the queue * @ssn: BA window current SSN */ -struct iwl_mvm_compressed_ba_ratid { +struct iwl_compressed_ba_ratid { u8 q_num; u8 tid; __le16 ssn; @@ -683,7 +685,7 @@ enum iwl_mvm_ba_resp_flags { }; /** - * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA + * struct iwl_compressed_ba_notif - notifies about reception of BA * ( BA_NOTIF = 0xc5 ) * @flags: status flag, see the &iwl_mvm_ba_resp_flags * @sta_id: Index of recipient (BA-sending) station in fw's station table @@ -696,17 +698,18 @@ enum iwl_mvm_ba_resp_flags { * @query_frame_cnt: SCD query frame count * @txed: number of frames sent in the aggregation (all-TIDs) * @done: number of frames that were Acked by the BA (all-TIDs) + * @rts_retry_cnt: RTS retry count * @reserved: reserved (for alignment) * @wireless_time: Wireless-media time * @tx_rate: the rate the aggregation was sent at * @tfd_cnt: number of TFD-Q elements * @ra_tid_cnt: number of RATID-Q elements - * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd + * @tfd: array of TFD queue status updates. See &iwl_compressed_ba_tfd * for details. Length in @tfd_cnt. * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See - * &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt. + * &iwl_compressed_ba_ratid for more details. Length in @ra_tid_cnt. */ -struct iwl_mvm_compressed_ba_notif { +struct iwl_compressed_ba_notif { __le32 flags; u8 sta_id; u8 reduced_txp; @@ -716,14 +719,15 @@ struct iwl_mvm_compressed_ba_notif { __le16 query_frame_cnt; __le16 txed; __le16 done; - __le16 reserved; + u8 rts_retry_cnt; + u8 reserved; __le32 wireless_time; __le32 tx_rate; __le16 tfd_cnt; __le16 ra_tid_cnt; union { - DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid); - DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd); + DECLARE_FLEX_ARRAY(struct iwl_compressed_ba_ratid, ra_tid); + DECLARE_FLEX_ARRAY(struct iwl_compressed_ba_tfd, tfd); }; } __packed; /* COMPRESSED_BA_RES_API_S_VER_4, COMPRESSED_BA_RES_API_S_VER_5 */ @@ -791,7 +795,8 @@ enum iwl_mac_beacon_flags { * @reserved: reserved * @link_id: the firmware id of the link that will use this beacon * @tim_idx: the offset of the tim IE in the beacon - * @tim_size: the length of the tim IE + * @tim_size: the length of the tim IE (version < 14) + * @btwt_offset: offset to the broadcast TWT IE if present (version >= 14) * @ecsa_offset: offset to the ECSA IE if present * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame @@ -803,18 +808,22 @@ struct iwl_mac_beacon_cmd { __le32 reserved; __le32 link_id; __le32 tim_idx; - __le32 tim_size; + union { + __le32 tim_size; + __le32 btwt_offset; + }; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10, * BEACON_TEMPLATE_CMD_API_S_VER_11, * BEACON_TEMPLATE_CMD_API_S_VER_12, - * BEACON_TEMPLATE_CMD_API_S_VER_13 + * BEACON_TEMPLATE_CMD_API_S_VER_13, + * BEACON_TEMPLATE_CMD_API_S_VER_14 */ struct iwl_beacon_notif { - struct iwl_mvm_tx_resp beacon_notify_hdr; + struct iwl_tx_resp beacon_notify_hdr; __le64 tsf; __le32 ibss_mgr_status; } __packed; @@ -827,7 +836,7 @@ struct iwl_beacon_notif { * @gp2: last beacon time in gp2 */ struct iwl_extended_beacon_notif_v5 { - struct iwl_mvm_tx_resp beacon_notify_hdr; + struct iwl_tx_resp beacon_notify_hdr; __le64 tsf; __le32 ibss_mgr_status; __le32 gp2; @@ -857,7 +866,7 @@ enum iwl_dump_control { }; /** - * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command + * struct iwl_tx_path_flush_cmd_v1 -- queue/FIFO flush command * @queues_ctl: bitmap of queues to flush * @flush_ctl: control flags * @reserved: reserved @@ -884,6 +893,7 @@ struct iwl_tx_path_flush_cmd { /** * struct iwl_flush_queue_info - virtual flush queue info + * @tid: the tid to flush * @queue_num: virtual queue id * @read_before_flush: read pointer before flush * @read_after_flush: read pointer after flush @@ -897,6 +907,7 @@ struct iwl_flush_queue_info { /** * struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response + * @sta_id: the station for which the queue was flushed * @num_flushed_queues: number of queues in queues array * @queues: all flushed queues */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 80fda056e46a..6594216f873c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -558,41 +558,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, } /* - * alloc_sgtable - allocates scallerlist table in the given size, - * fills it with pages and returns it + * alloc_sgtable - allocates (chained) scatterlist in the given size, + * fills it with pages and returns it * @size: the size (in bytes) of the table -*/ -static struct scatterlist *alloc_sgtable(int size) + */ +static struct scatterlist *alloc_sgtable(ssize_t size) { - int alloc_size, nents, i; - struct page *new_page; - struct scatterlist *iter; - struct scatterlist *table; + struct scatterlist *result = NULL, *prev; + int nents, i, n_prev; nents = DIV_ROUND_UP(size, PAGE_SIZE); - table = kcalloc(nents, sizeof(*table), GFP_KERNEL); - if (!table) - return NULL; - sg_init_table(table, nents); - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = alloc_page(GFP_KERNEL); - if (!new_page) { - /* release all previous allocated pages in the table */ - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = sg_page(iter); - if (new_page) - __free_page(new_page); - } - kfree(table); + +#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result)) + /* + * We need an additional entry for table chaining, + * this ensures the loop can finish i.e. we can + * fit at least two entries per page (obviously, + * many more really fit.) + */ + BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2); + + while (nents > 0) { + struct scatterlist *new, *iter; + int n_fill, n_alloc; + + if (nents <= N_ENTRIES_PER_PAGE) { + /* last needed table */ + n_fill = nents; + n_alloc = nents; + nents = 0; + } else { + /* fill a page with entries */ + n_alloc = N_ENTRIES_PER_PAGE; + /* reserve one for chaining */ + n_fill = n_alloc - 1; + nents -= n_fill; + } + + new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL); + if (!new) { + if (result) + _devcd_free_sgtable(result); return NULL; } - alloc_size = min_t(int, size, PAGE_SIZE); - size -= PAGE_SIZE; - sg_set_page(iter, new_page, alloc_size, 0); + sg_init_table(new, n_alloc); + + if (!result) + result = new; + else + sg_chain(prev, n_prev, new); + prev = new; + n_prev = n_alloc; + + for_each_sg(new, iter, n_fill, i) { + struct page *new_page = alloc_page(GFP_KERNEL); + + if (!new_page) { + _devcd_free_sgtable(result); + return NULL; + } + + sg_set_page(iter, new_page, PAGE_SIZE, 0); + } } - return table; + + return result; } static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, @@ -1026,17 +1056,12 @@ static int iwl_dump_ini_prph_mac_iter_common(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; - u32 prph_val; int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = size; - for (i = 0; i < le32_to_cpu(size); i += 4) { - prph_val = iwl_read_prph(fwrt->trans, addr + i); - if (iwl_trans_is_hw_error_value(prph_val)) - return -EBUSY; - *val++ = cpu_to_le32(prph_val); - } + for (i = 0; i < le32_to_cpu(size); i += 4) + *val++ = cpu_to_le32(iwl_read_prph(fwrt->trans, addr + i)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } @@ -1173,17 +1198,13 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt, le32_to_cpu(reg->dev_addr.offset); int i; - /* we shouldn't get here if the trans doesn't have read_config32 */ - if (WARN_ON_ONCE(!trans->ops->read_config32)) - return -EOPNOTSUPP; - range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { int ret; u32 tmp; - ret = trans->ops->read_config32(trans, addr + i, &tmp); + ret = iwl_trans_read_config32(trans, addr + i, &tmp); if (ret < 0) return ret; @@ -1727,10 +1748,12 @@ iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, /** * mask_apply_and_normalize - applies mask on val and normalize the result * - * The normalization is based on the first set bit in the mask - * * @val: value * @mask: mask to apply and to normalize with + * + * The normalization is based on the first set bit in the mask + * + * Returns: the extracted value */ static u32 mask_apply_and_normalize(u32 val, u32 mask) { @@ -2199,15 +2222,16 @@ struct iwl_dump_ini_mem_ops { }; /** - * iwl_dump_ini_mem - * - * Creates a dump tlv and copy a memory region into it. - * Returns the size of the current dump tlv or 0 if failed + * iwl_dump_ini_mem - dump memory region * * @fwrt: fw runtime struct * @list: list to add the dump tlv to * @reg_data: memory region * @ops: memory dump operations + * + * Creates a dump tlv and copy a memory region into it. + * + * Returns: the size of the current dump tlv or 0 if failed */ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, struct iwl_dump_ini_region_data *reg_data, @@ -2426,9 +2450,12 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)node->tlv.data; + BUILD_BUG_ON(sizeof(cfg_name->cfg_name) != + sizeof(debug_info->debug_cfg_name)); + cfg_name->image_type = debug_info->image_type; cfg_name->cfg_name_len = - cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME); + cpu_to_le32(sizeof(cfg_name->cfg_name)); memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name, sizeof(cfg_name->cfg_name)); cfg_name++; @@ -2872,7 +2899,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", le32_to_cpu(desc->trig_desc.type)); - schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay)); + queue_delayed_work(system_unbound_wq, &wk_data->wk, + usecs_to_jiffies(delay)); return 0; } @@ -3074,11 +3102,10 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) struct iwl_fw_dbg_params params = {0}; struct iwl_fwrt_dump_data *dump_data = &fwrt->dump.wks[wk_idx].dump_data; - u32 policy; - u32 time_point; if (!test_bit(wk_idx, &fwrt->dump.active_wks)) return; + /* also checks 'desc' for pre-ini mode, since that shadows in union */ if (!dump_data->trig) { IWL_ERR(fwrt, "dump trigger data is not set\n"); goto out; @@ -3106,13 +3133,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); - policy = le32_to_cpu(dump_data->trig->apply_policy); - time_point = le32_to_cpu(dump_data->trig->time_point); + if (iwl_trans_dbg_ini_valid(fwrt->trans)) { + u32 policy = le32_to_cpu(dump_data->trig->apply_policy); + u32 time_point = le32_to_cpu(dump_data->trig->time_point); - if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) { - IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n"); - iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0); + if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) { + IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n"); + iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0); + } } + if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) iwl_force_nmi(fwrt->trans); @@ -3174,7 +3204,9 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, if (sync) iwl_fw_dbg_collect_sync(fwrt, idx); else - schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); + queue_delayed_work(system_unbound_wq, + &fwrt->dump.wks[idx].wk, + usecs_to_jiffies(delay)); return 0; } @@ -3346,7 +3378,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, { int ret __maybe_unused = 0; - if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) + if (!iwl_trans_fw_running(fwrt->trans)) return; if (fw_has_capa(&fwrt->fw->ucode_capa, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index eb38c686b5cb..87998374f459 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2019, 2021-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2019, 2021-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -287,7 +287,7 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, trans->dbg.umac_error_event_table = umac_error_event_table; } -static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync) +static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) { enum iwl_fw_ini_time_point tp_id; @@ -303,11 +303,9 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync) tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT; } - _iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync); + iwl_dbg_tlv_time_point_sync(fwrt, tp_id, NULL); } -void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt); - static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt, struct iwl_lmac_alive *lmac, struct iwl_umac_alive *umac) @@ -329,18 +327,19 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt); void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, u32 timepoint, u32 timepoint_data); +bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id); void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt); void iwl_fw_dbg_clear_monitor_buf(struct iwl_fw_runtime *fwrt); -#define IWL_FW_CHECK_FAILED(_obj, _fmt, ...) \ - IWL_ERR_LIMIT(_obj, _fmt, __VA_ARGS__) +#define IWL_FW_CHECK_FAILED(_obj, ...) \ + IWL_ERR_LIMIT(_obj, __VA_ARGS__) #define IWL_FW_CHECK(_obj, _cond, _fmt, ...) \ ({ \ bool __cond = (_cond); \ \ if (unlikely(__cond)) \ - IWL_FW_CHECK_FAILED(_obj, _fmt, __VA_ARGS__); \ + IWL_FW_CHECK_FAILED(_obj, _fmt, ##__VA_ARGS__); \ \ unlikely(__cond); \ }) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 751a125a1566..f0c813d675f4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -123,6 +123,24 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ #define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \ FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) +static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_fw_runtime *fwrt, + char *buf, size_t count) +{ + if (count == 0) + return 0; + + if (!iwl_trans_fw_running(fwrt->trans)) + return count; + + iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, NULL); + + iwl_fw_dbg_collect(fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL); + + return count; +} + +FWRT_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 16); + static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt, char *buf, size_t count) { @@ -230,8 +248,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, .data = { NULL, }, }; - if (fwrt->ops && fwrt->ops->fw_running && - !fwrt->ops->fw_running(fwrt->ops_ctx)) + if (!iwl_trans_fw_running(fwrt->trans)) return -EIO; if (count < header_size + 1 || count > 1024 * 4) @@ -283,6 +300,26 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt, FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20); +static ssize_t iwl_dbgfs_fw_ver_read(struct iwl_fw_runtime *fwrt, + size_t size, char *buf) +{ + char *pos = buf; + char *endpos = buf + size; + + pos += scnprintf(pos, endpos - pos, "FW id: %s\n", + fwrt->fw->fw_version); + pos += scnprintf(pos, endpos - pos, "FW: %s\n", + fwrt->fw->human_readable); + pos += scnprintf(pos, endpos - pos, "Device: %s\n", + fwrt->trans->name); + pos += scnprintf(pos, endpos - pos, "Bus: %s\n", + fwrt->dev->bus->name); + + return pos - buf; +} + +FWRT_DEBUGFS_READ_FILE_OPS(fw_ver, 1024); + struct iwl_dbgfs_fw_info_priv { struct iwl_fw_runtime *fwrt; }; @@ -404,5 +441,7 @@ void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(enabled_severities, dbgfs_dir, 0200); + FWRT_DEBUGFS_ADD_FILE(fw_dbg_collect, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400); + FWRT_DEBUGFS_ADD_FILE(fw_ver, dbgfs_dir, 0400); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c index 8f107ceec407..c7b261c8ec96 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -530,3 +530,31 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt) } } IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs); + +bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id) +{ + struct error_table_start { + /* cf. struct iwl_error_event_table */ + u32 valid; + __le32 err_id; + } err_info = {}; + int ret; + + if (err_id) + *err_id = 0; + + if (!base) + return false; + + ret = iwl_trans_read_mem_bytes(trans, base, + &err_info, sizeof(err_info)); + + if (ret) + return true; + + if (err_info.valid && err_id) + *err_id = le32_to_cpu(err_info.err_id); + + return !!err_info.valid; +} +IWL_EXPORT_SYMBOL(iwl_fwrt_read_err_table); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 06d6f7f66430..3af275133da0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2014, 2018-2022 Intel Corporation + * Copyright (C) 2014, 2018-2024 Intel Corporation * Copyright (C) 2014-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -16,7 +16,7 @@ /** * enum iwl_fw_error_dump_type - types of data in the dump file * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0 - * @IWL_FW_ERROR_DUMP_RXF: + * @IWL_FW_ERROR_DUMP_RXF: RX FIFO contents * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as * &struct iwl_fw_error_dump_txcmd packets * @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info @@ -24,21 +24,24 @@ * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several * sections like this in a single file. + * @IWL_FW_ERROR_DUMP_TXF: TX FIFO contents * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers * @IWL_FW_ERROR_DUMP_MEM: chunk of memory * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump. * Structured as &struct iwl_fw_error_dump_trigger_desc. * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as * &struct iwl_fw_error_dump_rb - * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were + * @IWL_FW_ERROR_DUMP_PAGING: UMAC's image memory segments which were * paged to the DRAM. * @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers. + * @IWL_FW_ERROR_DUMP_INTERNAL_TXF: internal TX FIFO data * @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and * for that reason is not in use in any other place in the Linux Wi-Fi * stack. * @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem, * which we get from the fw after ALIVE. The content is structured as * &struct iwl_fw_error_dump_smem_cfg. + * @IWL_FW_ERROR_DUMP_D3_DEBUG_DATA: D3 debug data */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ @@ -59,8 +62,6 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */ IWL_FW_ERROR_DUMP_MEM_CFG = 16, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17, - - IWL_FW_ERROR_DUMP_MAX, }; /** @@ -168,7 +169,7 @@ struct iwl_fw_error_dump_info { * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer * @fw_mon_base_ptr: base pointer of the data * @fw_mon_cycle_cnt: number of wraparounds - * @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit + * @fw_mon_base_high_ptr: used in AX210 devices, the base address is 64 bit * so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold * MSB 32 bits * @reserved: for future use @@ -247,7 +248,7 @@ struct iwl_fw_error_dump_mem { #define IWL_INI_DUMP_NAME_TYPE (BIT(31) | BIT(24)) /** - * struct iwl_fw_error_dump_data - data for one type + * struct iwl_fw_ini_error_dump_data - data for one type * @type: &enum iwl_fw_ini_region_type * @sub_type: sub type id * @sub_type_ver: sub type version @@ -277,7 +278,7 @@ struct iwl_fw_ini_dump_entry { } __packed; /** - * struct iwl_fw_error_dump_file - header of dump file + * struct iwl_fw_ini_dump_file_hdr - header of dump file * @barker: must be %IWL_FW_INI_ERROR_DUMP_BARKER * @file_len: the length of all the file including the header */ @@ -442,7 +443,7 @@ struct iwl_fw_ini_err_table_dump { * struct iwl_fw_error_dump_rb - content of an Receive Buffer * @index: the index of the Receive Buffer in the Rx queue * @rxq: the RB's Rx queue - * @reserved: + * @reserved: reserved * @data: the content of the Receive Buffer */ struct iwl_fw_error_dump_rb { @@ -488,7 +489,7 @@ struct iwl_fw_ini_special_device_memory { * struct iwl_fw_error_dump_paging - content of the UMAC's image page * block on DRAM * @index: the index of the page block - * @reserved: + * @reserved: reserved * @data: the content of the page block */ struct iwl_fw_error_dump_paging { @@ -511,6 +512,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) /** * enum iwl_fw_dbg_trigger - triggers available * + * @FW_DBG_TRIGGER_INVALID: invalid trigger value * @FW_DBG_TRIGGER_USER: trigger log collection by user * This should not be defined as a trigger to the driver, but a value the * driver should set to indicate that the trigger was initiated by the @@ -530,14 +532,15 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related * events. * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events. - * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a - * threshold. - * @FW_DBG_TDLS: trigger log collection upon TDLS related events. + * @FW_DBG_TRIGGER_TX_LATENCY: trigger log collection when the tx latency + * goes above a threshold. + * @FW_DBG_TRIGGER_TDLS: trigger log collection upon TDLS related events. * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when * the firmware sends a tx reply. * @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts * @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure * in the driver. + * @FW_DBG_TRIGGER_MAX: beyond triggers, number for sizing arrays etc. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index bfc39bd5bbc6..9860903ecd3f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2008-2014, 2018-2023 Intel Corporation + * Copyright (C) 2008-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -104,6 +104,7 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_CURRENT_PC = 68, IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0, + IWL_UCODE_TLV_FW_NUM_LINKS = IWL_UCODE_TLV_CONST_BASE + 1, IWL_UCODE_TLV_FW_NUM_BEACONS = IWL_UCODE_TLV_CONST_BASE + 2, IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0, @@ -216,6 +217,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * ADD_MODIFY_STA_KEY_API_S_VER_2. * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2 + * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL: support for adaptive dwell in scanning * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field * indicating low latency direction. @@ -239,14 +241,21 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S. * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of * STA_CONTEXT_DOT11AX_API_S + * @IWL_UCODE_TLV_API_FTM_RTT_ACCURACY: version 7 of the range response API + * is supported by FW, this indicates the RTT confidence value * @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar * version tables. * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of - * SCAN_CONFIG_DB_CMD_API_S. + * SCAN_CONFIG_DB_CMD_API_S. + * @IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP: support for setting adaptive dwell + * number of APs in the 5 GHz band + * @IWL_UCODE_TLV_API_BAND_IN_RX_DATA: FW reports band number in RX notification * @IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX: Firmware offloaded the station disable tx * logic. * @IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR: Firmware supports clearing the debug * internal buffer + * @IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD: Firmware doesn't need the host to + * configure the smart fifo * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -287,6 +296,7 @@ enum iwl_ucode_tlv_api { /* API Set 2 */ IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX = (__force iwl_ucode_tlv_api_t)66, IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR = (__force iwl_ucode_tlv_api_t)67, + IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD = (__force iwl_ucode_tlv_api_t)68, NUM_IWL_UCODE_TLV_API /* @@ -375,7 +385,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * to report the CSI information with (certain) RX frames * @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both * initiator and responder - * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload + * @IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA: supports (de)activating UNII-4 + * for US/CA/WW from BIOS * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames * @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in * reset flow @@ -383,6 +394,14 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * channels even when these are not enabled. * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection * complete to FW. + * @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload + * protected) A-MSDU. + * @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement. + * @IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS: Support monitor mode on otherwise + * passive channels + * @IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA: supports (de)activating 5G9 + * for CA from BIOS. + * @IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT: supports %TAS_UHB_ALLOWED_CANADA * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -460,7 +479,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)93, /* set 3 */ - IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, + IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA = (__force iwl_ucode_tlv_capa_t)96, /* * @IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT: supports PSC channels @@ -468,6 +487,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98, IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100, + IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT = (__force iwl_ucode_tlv_capa_t)103, IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104, IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105, IWL_UCODE_TLV_CAPA_SYNCED_TIME = (__force iwl_ucode_tlv_capa_t)106, @@ -480,7 +500,10 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114, IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116, IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117, - + IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121, + IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = (__force iwl_ucode_tlv_capa_t)122, + IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA = (__force iwl_ucode_tlv_capa_t)123, + IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT = (__force iwl_ucode_tlv_capa_t)124, NUM_IWL_UCODE_TLV_CAPA /* * This construction make both sparse (which cannot increment the previous @@ -566,6 +589,7 @@ enum iwl_fw_dbg_reg_operator { * struct iwl_fw_dbg_reg_op - an operation on a register * * @op: &enum iwl_fw_dbg_reg_operator + * @reserved: reserved * @addr: offset of the register * @val: value */ @@ -612,6 +636,7 @@ struct iwl_fw_dbg_mem_seg_tlv { * @version: version of the TLV - currently 0 * @monitor_mode: &enum iwl_fw_dbg_monitor_mode * @size_power: buffer size will be 2^(size_power + 11) + * @reserved: reserved * @base_reg: addr of the base addr register (PRPH) * @end_reg: addr of the end addr register (PRPH) * @write_ptr_reg: the addr of the reg of the write pointer @@ -722,6 +747,8 @@ enum iwl_fw_dbg_trigger_vif_type { * @trig_dis_ms: the time, in milliseconds, after an occurrence of this * trigger in which another occurrence should be ignored. * @flags: &enum iwl_fw_dbg_trigger_flags + * @reserved: reserved (for alignment) + * @data: trigger data */ struct iwl_fw_dbg_trigger_tlv { __le32 id; @@ -762,7 +789,7 @@ struct iwl_fw_dbg_trigger_missed_bcon { /** * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW. - * cmds: the list of commands to trigger the collection on + * @cmds: the list of commands to trigger the collection on */ struct iwl_fw_dbg_trigger_cmd { struct cmd { @@ -772,7 +799,7 @@ struct iwl_fw_dbg_trigger_cmd { } __packed; /** - * iwl_fw_dbg_trigger_stats - configures trigger for statistics + * struct iwl_fw_dbg_trigger_stats - configures trigger for statistics * @stop_offset: the offset of the value to be monitored * @stop_threshold: the threshold above which to collect * @start_offset: the offset of the value to be monitored diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c index b7deca05a953..c2f4fc83a22c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2019 - 2021 Intel Corporation + * Copyright(c) 2024 Intel Corporation */ #include <fw/api/commands.h> #include "img.h" @@ -75,6 +76,7 @@ static const struct { { "NMI_INTERRUPT_ACTION_PT", 0x7C }, { "NMI_INTERRUPT_UNKNOWN", 0x84 }, { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "NMI_INTERRUPT_PREG", 0x88 }, { "PNVM_MISSING", FW_SYSASSERT_PNVM_MISSING }, { "ADVANCED_SYSASSERT", 0 }, }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 96bda80632f3..f9de139561a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -51,6 +51,7 @@ struct iwl_ucode_capabilities { u32 error_log_addr; u32 error_log_size; u32 num_stations; + u32 num_links; u32 num_beacons; unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)]; unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 135bd48bfe9f..de87e0e3e072 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021 Intel Corporation + * Copyright (C) 2019-2021, 2024 Intel Corporation */ #include "iwl-drv.h" #include "runtime.h" @@ -39,10 +39,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); +/* Assumes the appropriate lock is held by the caller */ void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt) { iwl_fw_suspend_timestamp(fwrt); - iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, NULL); + iwl_dbg_tlv_time_point_sync(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, + NULL); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend); @@ -135,7 +137,9 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt) struct iwl_trans_rxq_dma_data data; cmd->data[i].q_num = i + 1; - iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data); + ret = iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data); + if (ret) + goto out; cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb); cmd->data[i].urbd_stts_wrptr = @@ -149,6 +153,7 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt) ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); +out: kfree(cmd); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 945bc4160cc9..a7b7cae874a2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -249,7 +249,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt, }; int blk_idx; - /* loop for for all paging blocks + CSS block */ + /* loop for all paging blocks + CSS block */ for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) { dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys; __le32 phy_addr; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c index 650e4bde9c17..1195e708caa9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2020-2023 Intel Corporation + * Copyright(c) 2020-2024 Intel Corporation */ #include "iwl-drv.h" @@ -12,6 +12,8 @@ #include "fw/api/alive.h" #include "fw/uefi.h" +#define IWL_PNVM_REDUCED_CAP_BIT BIT(25) + struct iwl_pnvm_section { __le32 offset; const u8 data[]; @@ -173,6 +175,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, while (len >= sizeof(*tlv)) { u32 tlv_len, tlv_type; + u32 rf_type; len -= sizeof(*tlv); tlv = (const void *)data; @@ -201,6 +204,16 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, data += sizeof(*tlv) + ALIGN(tlv_len, 4); len -= ALIGN(tlv_len, 4); + trans->reduced_cap_sku = false; + rf_type = CSR_HW_RFID_TYPE(trans->hw_rf_id); + if ((trans->sku_id[0] & IWL_PNVM_REDUCED_CAP_BIT) && + rf_type == IWL_CFG_RF_TYPE_FM) + trans->reduced_cap_sku = true; + + IWL_DEBUG_FW(trans, + "Reduced SKU device %d\n", + trans->reduced_cap_sku); + if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) && trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) && trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) { @@ -239,7 +252,7 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len) } new_len = pnvm->size; - *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL); + *data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL); release_firmware(pnvm); if (!*data) @@ -255,21 +268,27 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len) struct pnvm_sku_package *package; u8 *image = NULL; - /* First attempt to get the PNVM from BIOS */ - package = iwl_uefi_get_pnvm(trans_p, len); - if (!IS_ERR_OR_NULL(package)) { - if (*len >= sizeof(*package)) { - /* we need only the data */ - *len -= sizeof(*package); - image = kmemdup(package->data, *len, GFP_KERNEL); + /* Get PNVM from BIOS for non-Intel SKU */ + if (trans_p->sku_id[2]) { + package = iwl_uefi_get_pnvm(trans_p, len); + if (!IS_ERR_OR_NULL(package)) { + if (*len >= sizeof(*package)) { + /* we need only the data */ + *len -= sizeof(*package); + image = kvmemdup(package->data, + *len, GFP_KERNEL); + } + /* + * free package regardless of whether kmemdup + * succeeded + */ + kfree(package); + if (image) + return image; } - /* free package regardless of whether kmemdup succeeded */ - kfree(package); - if (image) - return image; } - /* If it's not available, try from the filesystem */ + /* If it's not available, or for Intel SKU, try from the filesystem */ if (iwl_pnvm_get_from_fs(trans_p, &image, len)) return NULL; return image; @@ -314,7 +333,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans, set: iwl_trans_set_pnvm(trans, capa); free: - kfree(data); + kvfree(data); kfree(pnvm_data); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c new file mode 100644 index 000000000000..ea435ee94312 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2023 Intel Corporation + */ +#include <linux/dmi.h> +#include "iwl-drv.h" +#include "iwl-debug.h" +#include "regulatory.h" +#include "fw/runtime.h" +#include "fw/uefi.h" + +#define GET_BIOS_TABLE(__name, ...) \ +do { \ + int ret = -ENOENT; \ + if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \ + ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \ + if (ret < 0) \ + ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \ + return ret; \ +} while (0) + +#define IWL_BIOS_TABLE_LOADER(__name) \ +int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \ +{GET_BIOS_TABLE(__name, fwrt); } \ +IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name) + +#define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \ +int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \ + data_type * data) \ +{GET_BIOS_TABLE(__name, fwrt, data); } \ +IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name) + +IWL_BIOS_TABLE_LOADER(wrds_table); +IWL_BIOS_TABLE_LOADER(ewrd_table); +IWL_BIOS_TABLE_LOADER(wgds_table); +IWL_BIOS_TABLE_LOADER(ppag_table); +IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data); +IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64); +IWL_BIOS_TABLE_LOADER_DATA(mcc, char); +IWL_BIOS_TABLE_LOADER_DATA(eckv, u32); +IWL_BIOS_TABLE_LOADER_DATA(wbem, u32); +IWL_BIOS_TABLE_LOADER_DATA(dsbr, u32); + + +static const struct dmi_system_id dmi_ppag_approved_list[] = { + { .ident = "HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + }, + }, + { .ident = "SAMSUNG", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), + }, + }, + { .ident = "MSFT", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + }, + }, + { .ident = "ASUS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + }, + }, + { .ident = "GOOGLE-HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + }, + }, + { .ident = "GOOGLE-ASUS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."), + }, + }, + { .ident = "GOOGLE-SAMSUNG", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + }, + }, + { .ident = "RAZER", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Razer"), + }, + }, + { .ident = "Honor", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HONOR"), + }, + }, + { .ident = "WIKO", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "WIKO"), + }, + }, + {} +}; + +static const struct dmi_system_id dmi_tas_approved_list[] = { + { .ident = "HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + }, + }, + { .ident = "SAMSUNG", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), + }, + }, + { .ident = "LENOVO", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + }, + }, + { .ident = "MSFT", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + }, + }, + { .ident = "Acer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + }, + }, + { .ident = "ASUS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + }, + }, + { .ident = "GOOGLE-HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + }, + }, + { .ident = "MSI", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), + }, + }, + { .ident = "Honor", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HONOR"), + }, + }, + /* keep last */ + {} +}; + +bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) +{ + /* + * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on + * earlier firmware versions. Unfortunately, we don't have a + * TLV API flag to rely on, so rely on the major version which + * is in the first byte of ucode_ver. This was implemented + * initially on version 38 and then backported to 17. It was + * also backported to 29, but only for 7265D devices. The + * intention was to have it in 36 as well, but not all 8000 + * family got this feature enabled. The 8000 family is the + * only one using version 36, so skip this version entirely. + */ + return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && + fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) || + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && + ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == + CSR_HW_REV_TYPE_7265D)); +} +IWL_EXPORT_SYMBOL(iwl_sar_geo_support); + +int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt, + struct iwl_per_chain_offset *table, + u32 n_bands, u32 n_profiles) +{ + int i, j; + + if (!fwrt->geo_enabled) + return -ENODATA; + + if (!iwl_sar_geo_support(fwrt)) + return -EOPNOTSUPP; + + for (i = 0; i < n_profiles; i++) { + for (j = 0; j < n_bands; j++) { + struct iwl_per_chain_offset *chain = + &table[i * n_bands + j]; + + chain->max_tx_power = + cpu_to_le16(fwrt->geo_profiles[i].bands[j].max); + chain->chain_a = + fwrt->geo_profiles[i].bands[j].chains[0]; + chain->chain_b = + fwrt->geo_profiles[i].bands[j].chains[1]; + IWL_DEBUG_RADIO(fwrt, + "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", + i, j, + fwrt->geo_profiles[i].bands[j].chains[0], + fwrt->geo_profiles[i].bands[j].chains[1], + fwrt->geo_profiles[i].bands[j].max); + } + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table); + +static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, + __le16 *per_chain, u32 n_subbands, + int prof_a, int prof_b) +{ + int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b }; + int i, j; + + for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) { + struct iwl_sar_profile *prof; + + /* don't allow SAR to be disabled (profile 0 means disable) */ + if (profs[i] == 0) + return -EPERM; + + /* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */ + if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM) + return -EINVAL; + + /* profiles go from 1 to 4, so decrement to access the array */ + prof = &fwrt->sar_profiles[profs[i] - 1]; + + /* if the profile is disabled, do nothing */ + if (!prof->enabled) { + IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n", + profs[i]); + /* + * if one of the profiles is disabled, we + * ignore all of them and return 1 to + * differentiate disabled from other failures. + */ + return 1; + } + + IWL_DEBUG_INFO(fwrt, + "SAR EWRD: chain %d profile index %d\n", + i, profs[i]); + IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); + for (j = 0; j < n_subbands; j++) { + per_chain[i * n_subbands + j] = + cpu_to_le16(prof->chains[i].subbands[j]); + IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", + j, prof->chains[i].subbands[j]); + } + } + + return 0; +} + +int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt, + __le16 *per_chain, u32 n_tables, u32 n_subbands, + int prof_a, int prof_b) +{ + int i, ret = 0; + + for (i = 0; i < n_tables; i++) { + ret = iwl_sar_fill_table(fwrt, + &per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS], + n_subbands, prof_a, prof_b); + if (ret) + break; + } + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_sar_fill_profile); + +static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain, + int subband) +{ + s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband]; + + if ((subband == 0 && + (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) || + (subband != 0 && + (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) { + IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val); + return false; + } + return true; +} + +int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, + union iwl_ppag_table_cmd *cmd, int *cmd_size) +{ + u8 cmd_ver; + int i, j, num_sub_bands; + s8 *gain; + bool send_ppag_always; + + /* many firmware images for JF lie about this */ + if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) == + CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) + return -EOPNOTSUPP; + + if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { + IWL_DEBUG_RADIO(fwrt, + "PPAG capability not supported by FW, command not sent.\n"); + return -EINVAL; + } + + cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(PHY_OPS_GROUP, + PER_PLATFORM_ANT_GAIN_CMD), 1); + /* + * Starting from ver 4, driver needs to send the PPAG CMD regardless + * if PPAG is enabled/disabled or valid/invalid. + */ + send_ppag_always = cmd_ver > 3; + + /* Don't send PPAG if it is disabled */ + if (!send_ppag_always && !fwrt->ppag_flags) { + IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); + return -EINVAL; + } + + /* The 'flags' field is the same in v1 and in v2 so we can just + * use v1 to access it. + */ + cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags); + + IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver); + if (cmd_ver == 1) { + num_sub_bands = IWL_NUM_SUB_BANDS_V1; + gain = cmd->v1.gain[0]; + *cmd_size = sizeof(cmd->v1); + if (fwrt->ppag_ver >= 1) { + /* in this case FW supports revision 0 */ + IWL_DEBUG_RADIO(fwrt, + "PPAG table rev is %d, send truncated table\n", + fwrt->ppag_ver); + } + } else if (cmd_ver >= 2 && cmd_ver <= 6) { + num_sub_bands = IWL_NUM_SUB_BANDS_V2; + gain = cmd->v2.gain[0]; + *cmd_size = sizeof(cmd->v2); + if (fwrt->ppag_ver == 0) { + /* in this case FW supports revisions 1,2 or 3 */ + IWL_DEBUG_RADIO(fwrt, + "PPAG table rev is 0, send padded table\n"); + } + } else { + IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); + return -EINVAL; + } + + /* ppag mode */ + IWL_DEBUG_RADIO(fwrt, + "PPAG MODE bits were read from bios: %d\n", + le32_to_cpu(cmd->v1.flags)); + + if (cmd_ver == 5) + cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK); + else if (cmd_ver < 5) + cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK); + + if ((cmd_ver == 1 && + !fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) || + (cmd_ver == 2 && fwrt->ppag_ver >= 2)) { + cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); + IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n"); + } else { + IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n"); + } + + IWL_DEBUG_RADIO(fwrt, + "PPAG MODE bits going to be sent: %d\n", + le32_to_cpu(cmd->v1.flags)); + + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { + for (j = 0; j < num_sub_bands; j++) { + if (!send_ppag_always && + !iwl_ppag_value_valid(fwrt, i, j)) + return -EINVAL; + + gain[i * num_sub_bands + j] = + fwrt->ppag_chains[i].subbands[j]; + IWL_DEBUG_RADIO(fwrt, + "PPAG table: chain[%d] band[%d]: gain = %d\n", + i, j, gain[i * num_sub_bands + j]); + } + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_fill_ppag_table); + +bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt) +{ + if (!dmi_check_system(dmi_ppag_approved_list)) { + IWL_DEBUG_RADIO(fwrt, + "System vendor '%s' is not in the approved list, disabling PPAG.\n", + dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>"); + fwrt->ppag_flags = 0; + return false; + } + + return true; +} +IWL_EXPORT_SYMBOL(iwl_is_ppag_approved); + +bool iwl_is_tas_approved(void) +{ + return dmi_check_system(dmi_tas_approved_list); +} +IWL_EXPORT_SYMBOL(iwl_is_tas_approved); + +struct iwl_tas_selection_data +iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev) +{ + struct iwl_tas_selection_data tas_selection_out = {}; + u8 override_iec = u32_get_bits(tas_selection_in, + IWL_WTAS_OVERRIDE_IEC_MSK); + u8 canada_tas_uhb = u32_get_bits(tas_selection_in, + IWL_WTAS_CANADA_UHB_MSK); + u8 enabled_iec = u32_get_bits(tas_selection_in, + IWL_WTAS_ENABLE_IEC_MSK); + u8 usa_tas_uhb = u32_get_bits(tas_selection_in, + IWL_WTAS_USA_UHB_MSK); + + if (tbl_rev > 0) { + tas_selection_out.usa_tas_uhb_allowed = usa_tas_uhb; + tas_selection_out.override_tas_iec = override_iec; + tas_selection_out.enable_tas_iec = enabled_iec; + } + + if (tbl_rev > 1) + tas_selection_out.canada_tas_uhb_allowed = canada_tas_uhb; + + return tas_selection_out; +} +IWL_EXPORT_SYMBOL(iwl_parse_tas_selection); + +static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) +{ + int ret; + u32 val; + __le32 config_bitmap = 0; + + switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) { + case IWL_CFG_RF_TYPE_HR1: + case IWL_CFG_RF_TYPE_HR2: + case IWL_CFG_RF_TYPE_JF1: + case IWL_CFG_RF_TYPE_JF2: + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2, + &val); + + if (!ret && val == DSM_VALUE_INDONESIA_ENABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + break; + default: + break; + } + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val); + if (!ret) { + if (val == DSM_VALUE_SRD_PASSIVE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); + else if (val == DSM_VALUE_SRD_DISABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); + } + + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) { + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG, + &val); + /* + * China 2022 enable if the BIOS object does not exist or + * if it is enabled in BIOS. + */ + if (ret < 0 || val & DSM_MASK_CHINA_22_REG) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK); + } + + return config_bitmap; +} + +static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver) +{ + size_t cmd_size; + + switch (cmd_ver) { + case 12: + case 11: + cmd_size = sizeof(struct iwl_lari_config_change_cmd); + break; + case 10: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10); + break; + case 9: + case 8: + case 7: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); + break; + case 6: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); + break; + case 5: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); + break; + case 4: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); + break; + case 3: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); + break; + case 2: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); + break; + default: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); + break; + } + return cmd_size; +} + +int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, + struct iwl_lari_config_change_cmd *cmd, + size_t *cmd_size) +{ + int ret; + u32 value; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE), 1); + + memset(cmd, 0, sizeof(*cmd)); + *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver); + + cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value); + if (!ret) + cmd->oem_11ax_allow_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value); + if (!ret) { + value &= DSM_UNII4_ALLOW_BITMAP; + + /* Since version 9, bits 4 and 5 are supported + * regardless of this capability. + */ + if (cmd_ver < 9 && + !fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA)) + value &= ~(DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | + DSM_VALUE_UNII4_CANADA_EN_MSK); + + cmd->oem_unii4_allow_bitmap = cpu_to_le32(value); + } + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value); + if (!ret) { + if (cmd_ver < 8) + value &= ~ACTIVATE_5G2_IN_WW_MASK; + + /* Since version 12, bits 5 and 6 are supported + * regardless of this capability. + */ + if (cmd_ver < 12 && + !fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA)) + value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11; + + cmd->chan_state_active_bitmap = cpu_to_le32(value); + } + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value); + if (!ret) + cmd->oem_uhb_allow_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value); + if (!ret) + cmd->force_disable_channels_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD, + &value); + if (!ret) + cmd->edt_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_wbem(fwrt, &value); + if (!ret) + cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value); + if (!ret) + cmd->oem_11be_allow_bitmap = cpu_to_le32(value); + + if (cmd->config_bitmap || + cmd->oem_uhb_allow_bitmap || + cmd->oem_11ax_allow_bitmap || + cmd->oem_unii4_allow_bitmap || + cmd->chan_state_active_bitmap || + cmd->force_disable_channels_bitmap || + cmd->edt_bitmap || + cmd->oem_320mhz_allow_bitmap || + cmd->oem_11be_allow_bitmap) { + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", + le32_to_cpu(cmd->config_bitmap), + le32_to_cpu(cmd->oem_11ax_allow_bitmap)); + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", + le32_to_cpu(cmd->oem_unii4_allow_bitmap), + le32_to_cpu(cmd->chan_state_active_bitmap), + cmd_ver); + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", + le32_to_cpu(cmd->oem_uhb_allow_bitmap), + le32_to_cpu(cmd->force_disable_channels_bitmap)); + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n", + le32_to_cpu(cmd->edt_bitmap), + le32_to_cpu(cmd->oem_320mhz_allow_bitmap)); + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n", + le32_to_cpu(cmd->oem_11be_allow_bitmap)); + } else { + return 1; + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_fill_lari_config); + +int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, + u32 *value) +{ + GET_BIOS_TABLE(dsm, fwrt, func, value); +} +IWL_EXPORT_SYMBOL(iwl_bios_get_dsm); + +bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc) +{ + /* Some kind of regulatory mess means we need to currently disallow + * puncturing in the US and Canada unless enabled in BIOS. + */ + switch (mcc) { + case IWL_MCC_US: + return puncturing & IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK; + case IWL_MCC_CANADA: + return puncturing & IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK; + default: + return true; + } +} +IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h new file mode 100644 index 000000000000..b355d7bef14c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2023-2024 Intel Corporation + */ + +#ifndef __fw_regulatory_h__ +#define __fw_regulatory_h__ + +#include "fw/img.h" +#include "fw/api/commands.h" +#include "fw/api/power.h" +#include "fw/api/phy.h" +#include "fw/api/config.h" +#include "fw/api/nvm-reg.h" +#include "fw/img.h" +#include "iwl-trans.h" + +#define BIOS_SAR_MAX_PROFILE_NUM 4 +/* + * Each SAR profile has (up to, depends on the table revision) 4 chains: + * chain A, chain B, chain A when in CDB, chain B when in CDB + */ +#define BIOS_SAR_MAX_CHAINS_PER_PROFILE 4 +#define BIOS_SAR_NUM_CHAINS 2 +#define BIOS_SAR_MAX_SUB_BANDS_NUM 11 + +#define BIOS_GEO_NUM_CHAINS 2 +#define BIOS_GEO_MAX_NUM_BANDS 3 +#define BIOS_GEO_MAX_PROFILE_NUM 8 +#define BIOS_GEO_MIN_PROFILE_NUM 3 + +#define IWL_SAR_ENABLE_MSK BIT(0) + +/* PPAG gain value bounds in 1/8 dBm */ +#define IWL_PPAG_MIN_LB -16 +#define IWL_PPAG_MAX_LB 24 +#define IWL_PPAG_MIN_HB -16 +#define IWL_PPAG_MAX_HB 40 + +#define IWL_PPAG_ETSI_CHINA_MASK 3 +#define IWL_PPAG_REV3_MASK 0x7FF + +#define IWL_WTAS_ENABLED_MSK BIT(0) +#define IWL_WTAS_OVERRIDE_IEC_MSK BIT(1) +#define IWL_WTAS_ENABLE_IEC_MSK BIT(2) +#define IWL_WTAS_CANADA_UHB_MSK BIT(15) +#define IWL_WTAS_USA_UHB_MSK BIT(16) + +struct iwl_tas_selection_data { + u8 override_tas_iec:1, + enable_tas_iec:1, + usa_tas_uhb_allowed:1, + canada_tas_uhb_allowed:1; +}; + +#define BIOS_MCC_CHINA 0x434e + +/* + * The profile for revision 2 is a superset of revision 1, which is in + * turn a superset of revision 0. So we can store all revisions + * inside revision 2, which is what we represent here. + */ + +/* + * struct iwl_sar_profile_chain - per-chain values of a SAR profile + * @subbands: the SAR value for each subband + */ +struct iwl_sar_profile_chain { + u8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM]; +}; + +/* + * struct iwl_sar_profile - SAR profile from SAR tables + * @enabled: whether the profile is enabled or not + * @chains: per-chain SAR values + */ +struct iwl_sar_profile { + bool enabled; + struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE]; +}; + +/* Same thing as with SAR, all revisions fit in revision 2 */ + +/* + * struct iwl_geo_profile_band - per-band geo SAR offsets + * @max: the max tx power allowed for the band + * @chains: SAR offsets values for each chain + */ +struct iwl_geo_profile_band { + u8 max; + u8 chains[BIOS_GEO_NUM_CHAINS]; +}; + +/* + * struct iwl_geo_profile - geo profile + * @bands: per-band table of the SAR offsets + */ +struct iwl_geo_profile { + struct iwl_geo_profile_band bands[BIOS_GEO_MAX_NUM_BANDS]; +}; + +/* Same thing as with SAR, all revisions fit in revision 2 */ +struct iwl_ppag_chain { + s8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM]; +}; + +struct iwl_tas_data { + u8 block_list_size; + u16 block_list_array[IWL_WTAS_BLACK_LIST_MAX]; + u8 table_source; + u8 table_revision; + u32 tas_selection; +}; + +/* For DSM revision 0 and 4 */ +enum iwl_dsm_funcs { + DSM_FUNC_QUERY = 0, + DSM_FUNC_DISABLE_SRD = 1, + DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, + DSM_FUNC_ENABLE_6E = 3, + DSM_FUNC_REGULATORY_CONFIG = 4, + DSM_FUNC_11AX_ENABLEMENT = 6, + DSM_FUNC_ENABLE_UNII4_CHAN = 7, + DSM_FUNC_ACTIVATE_CHANNEL = 8, + DSM_FUNC_FORCE_DISABLE_CHANNELS = 9, + DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10, + DSM_FUNC_RFI_CONFIG = 11, + DSM_FUNC_ENABLE_11BE = 12, + DSM_FUNC_NUM_FUNCS = 13, +}; + +enum iwl_dsm_values_srd { + DSM_VALUE_SRD_ACTIVE, + DSM_VALUE_SRD_PASSIVE, + DSM_VALUE_SRD_DISABLE, + DSM_VALUE_SRD_MAX +}; + +enum iwl_dsm_values_indonesia { + DSM_VALUE_INDONESIA_DISABLE, + DSM_VALUE_INDONESIA_ENABLE, + DSM_VALUE_INDONESIA_RESERVED, + DSM_VALUE_INDONESIA_MAX +}; + +enum iwl_dsm_unii4_bitmap { + DSM_VALUE_UNII4_US_OVERRIDE_MSK = BIT(0), + DSM_VALUE_UNII4_US_EN_MSK = BIT(1), + DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK = BIT(2), + DSM_VALUE_UNII4_ETSI_EN_MSK = BIT(3), + DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK = BIT(4), + DSM_VALUE_UNII4_CANADA_EN_MSK = BIT(5), +}; + +#define DSM_UNII4_ALLOW_BITMAP (DSM_VALUE_UNII4_US_OVERRIDE_MSK |\ + DSM_VALUE_UNII4_US_EN_MSK |\ + DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK |\ + DSM_VALUE_UNII4_ETSI_EN_MSK |\ + DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK |\ + DSM_VALUE_UNII4_CANADA_EN_MSK) + +enum iwl_dsm_values_rfi { + DSM_VALUE_RFI_DLVR_DISABLE = BIT(0), + DSM_VALUE_RFI_DDR_DISABLE = BIT(1), +}; + +#define DSM_VALUE_RFI_DISABLE (DSM_VALUE_RFI_DLVR_DISABLE |\ + DSM_VALUE_RFI_DDR_DISABLE) + +enum iwl_dsm_masks_reg { + DSM_MASK_CHINA_22_REG = BIT(2) +}; + +struct iwl_fw_runtime; + +bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); + +int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt, + struct iwl_per_chain_offset *table, + u32 n_bands, u32 n_profiles); + +int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt, + __le16 *per_chain, u32 n_tables, u32 n_subbands, + int prof_a, int prof_b); + +int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, + union iwl_ppag_table_cmd *cmd, + int *cmd_size); + +bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt); + +bool iwl_is_tas_approved(void); + +struct iwl_tas_selection_data +iwl_parse_tas_selection(const u32 tas_selection, const u8 tbl_rev); + +int iwl_bios_get_wrds_table(struct iwl_fw_runtime *fwrt); + +int iwl_bios_get_ewrd_table(struct iwl_fw_runtime *fwrt); + +int iwl_bios_get_wgds_table(struct iwl_fw_runtime *fwrt); + +int iwl_bios_get_ppag_table(struct iwl_fw_runtime *fwrt); + +int iwl_bios_get_tas_table(struct iwl_fw_runtime *fwrt, + struct iwl_tas_data *data); + +int iwl_bios_get_pwr_limit(struct iwl_fw_runtime *fwrt, + u64 *dflt_pwr_limit); + +int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc); +int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk); +int iwl_bios_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value); + +int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, + struct iwl_lari_config_change_cmd *cmd, + size_t *cmd_size); + +int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, + u32 *value); + +static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes, + const u8 ppag_ver) +{ + return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK : + IWL_PPAG_REV3_MASK); +} + +bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc); + +#define IWL_DSBR_FW_MODIFIED_URM_MASK BIT(8) +#define IWL_DSBR_PERMANENT_URM_MASK BIT(9) + +int iwl_bios_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value); + +static inline void iwl_bios_setup_step(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt) +{ + u32 dsbr; + + if (!trans->trans_cfg->integrated) + return; + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) + return; + + if (iwl_bios_get_dsbr(fwrt, &dsbr)) + dsbr = 0; + + trans->dsbr_urm_fw_dependent = !!(dsbr & IWL_DSBR_FW_MODIFIED_URM_MASK); + trans->dsbr_urm_permanent = !!(dsbr & IWL_DSBR_PERMANENT_URM_MASK); +} +#endif /* __fw_regulatory_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 357727774db9..048877fa7c71 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #ifndef __iwl_fw_runtime_h__ #define __iwl_fw_runtime_h__ @@ -12,13 +12,13 @@ #include "fw/api/debug.h" #include "fw/api/paging.h" #include "fw/api/power.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "fw/acpi.h" +#include "fw/regulatory.h" struct iwl_fw_runtime_ops { void (*dump_start)(void *ctx); void (*dump_end)(void *ctx); - bool (*fw_running)(void *ctx); int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); bool (*d3_debug_enable)(void *ctx); }; @@ -45,6 +45,10 @@ struct iwl_fwrt_shared_mem_cfg { * struct iwl_fwrt_dump_data - dump data * @trig: trigger the worker was scheduled upon * @fw_pkt: packet received from FW + * + * Note that the decision which part of the union is used + * is based on iwl_trans_dbg_ini_valid(): the 'trig' part + * is used if it is %true, the 'desc' part otherwise. */ struct iwl_fwrt_dump_data { union { @@ -53,6 +57,7 @@ struct iwl_fwrt_dump_data { struct iwl_rx_packet *fw_pkt; }; struct { + /* must be first to be same as 'trig' */ const struct iwl_fw_dump_desc *desc; bool monitor_only; }; @@ -98,8 +103,12 @@ struct iwl_txf_iter_data { * @cur_fw_img: current firmware image, must be maintained by * the driver by calling &iwl_fw_set_current_image() * @dump: debug dump data - * @uats_enabled: VLP or AFC AP is enabled * @uats_table: AP type table + * @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock: + * 0: Unlocked, 1 and 2: Locked. + * Only read the UEFI variables if locked. + * @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables + * @geo_profiles: geographic profiles as read from WGDS BIOS table */ struct iwl_fw_runtime { struct iwl_trans *trans; @@ -158,24 +167,21 @@ struct iwl_fw_runtime { #ifdef CONFIG_IWLWIFI_DEBUGFS bool tpc_enabled; #endif /* CONFIG_IWLWIFI_DEBUGFS */ -#ifdef CONFIG_ACPI - struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM]; + struct iwl_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM]; u8 sar_chain_a_profile; u8 sar_chain_b_profile; - struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3]; + u8 reduced_power_flags; + struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM]; u32 geo_rev; u32 geo_num_profiles; bool geo_enabled; struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS]; u32 ppag_flags; - u32 ppag_ver; - bool ppag_table_valid; + u8 ppag_ver; struct iwl_sar_offset_mapping_cmd sgom_table; bool sgom_enabled; - u8 reduced_power_flags; - bool uats_enabled; - struct iwl_uats_table_cmd uats_table; -#endif + struct iwl_mcc_allowed_ap_type_cmd uats_table; + u8 uefi_tables_lock_status; }; void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index 2964c5fb11e9..434eed4130b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2021-2023 Intel Corporation + * Copyright(c) 2021-2024 Intel Corporation */ #include "iwl-drv.h" @@ -13,9 +13,12 @@ #include <linux/efi.h> #include "fw/runtime.h" -#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \ - 0xb2, 0xec, 0xf5, 0xa3, \ - 0x59, 0x4f, 0x4a, 0xea) +#define IWL_EFI_WIFI_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \ + 0xb2, 0xec, 0xf5, 0xa3, \ + 0x59, 0x4f, 0x4a, 0xea) +#define IWL_EFI_WIFI_BT_GUID EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, \ + 0x8d, 0x03, 0x77, 0x2e, \ + 0xcc, 0x3d, 0xa5, 0x31) struct iwl_uefi_pnvm_mem_desc { __le32 addr; @@ -61,7 +64,7 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) *len = 0; - data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID, + data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_WIFI_GUID, &package_size); if (IS_ERR(data)) { IWL_DEBUG_FW(trans, @@ -76,6 +79,54 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) return data; } +static void * +iwl_uefi_get_verified_variable_guid(struct iwl_trans *trans, + efi_guid_t *guid, + efi_char16_t *uefi_var_name, + char *var_name, + unsigned int expected_size, + unsigned long *size) +{ + void *var; + unsigned long var_size; + + var = iwl_uefi_get_variable(uefi_var_name, guid, &var_size); + + if (IS_ERR(var)) { + IWL_DEBUG_RADIO(trans, + "%s UEFI variable not found 0x%lx\n", var_name, + PTR_ERR(var)); + return var; + } + + if (var_size < expected_size) { + IWL_DEBUG_RADIO(trans, + "Invalid %s UEFI variable len (%lu)\n", + var_name, var_size); + kfree(var); + return ERR_PTR(-EINVAL); + } + + IWL_DEBUG_RADIO(trans, "%s from UEFI with size %lu\n", var_name, + var_size); + + if (size) + *size = var_size; + return var; +} + +static void * +iwl_uefi_get_verified_variable(struct iwl_trans *trans, + efi_char16_t *uefi_var_name, + char *var_name, + unsigned int expected_size, + unsigned long *size) +{ + return iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_GUID, + uefi_var_name, var_name, + expected_size, size); +} + int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, u32 tlv_len, struct iwl_pnvm_image *pnvm_data) { @@ -230,26 +281,13 @@ u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) unsigned long package_size; u8 *data; - package = iwl_uefi_get_variable(IWL_UEFI_REDUCED_POWER_NAME, - &IWL_EFI_VAR_GUID, &package_size); - - if (IS_ERR(package)) { - IWL_DEBUG_FW(trans, - "Reduced Power UEFI variable not found 0x%lx (len %lu)\n", - PTR_ERR(package), package_size); + package = iwl_uefi_get_verified_variable(trans, + IWL_UEFI_REDUCED_POWER_NAME, + "Reduced Power", + sizeof(*package), + &package_size); + if (IS_ERR(package)) return ERR_CAST(package); - } - - if (package_size < sizeof(*package)) { - IWL_DEBUG_FW(trans, - "Invalid Reduced Power UEFI variable len (%lu)\n", - package_size); - kfree(package); - return ERR_PTR(-EINVAL); - } - - IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n", - package_size); IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n", package->rev, package->total_size, package->n_skus); @@ -283,32 +321,16 @@ static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_dat void iwl_uefi_get_step_table(struct iwl_trans *trans) { struct uefi_cnv_common_step_data *data; - unsigned long package_size; int ret; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return; - data = iwl_uefi_get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID, - &package_size); - - if (IS_ERR(data)) { - IWL_DEBUG_FW(trans, - "STEP UEFI variable not found 0x%lx\n", - PTR_ERR(data)); + data = iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_BT_GUID, + IWL_UEFI_STEP_NAME, + "STEP", sizeof(*data), NULL); + if (IS_ERR(data)) return; - } - - if (package_size < sizeof(*data)) { - IWL_DEBUG_FW(trans, - "Invalid STEP table UEFI variable len (%lu)\n", - package_size); - kfree(data); - return; - } - - IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n", - package_size); ret = iwl_uefi_step_parse(data, trans); if (ret < 0) @@ -318,7 +340,6 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans) } IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table); -#ifdef CONFIG_ACPI static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data, struct iwl_fw_runtime *fwrt) { @@ -355,31 +376,15 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { struct uefi_cnv_wlan_sgom_data *data; - unsigned long package_size; int ret; if (!fwrt->geo_enabled) return; - data = iwl_uefi_get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID, - &package_size); - if (IS_ERR(data)) { - IWL_DEBUG_FW(trans, - "SGOM UEFI variable not found 0x%lx\n", - PTR_ERR(data)); + data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_SGOM_NAME, + "SGOM", sizeof(*data), NULL); + if (IS_ERR(data)) return; - } - - if (package_size < sizeof(*data)) { - IWL_DEBUG_FW(trans, - "Invalid SGOM table UEFI variable len (%lu)\n", - package_size); - kfree(data); - return; - } - - IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n", - package_size); ret = iwl_uefi_sgom_parse(data, fwrt); if (ret < 0) @@ -404,28 +409,12 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { struct uefi_cnv_wlan_uats_data *data; - unsigned long package_size; int ret; - data = iwl_uefi_get_variable(IWL_UEFI_UATS_NAME, &IWL_EFI_VAR_GUID, - &package_size); - if (IS_ERR(data)) { - IWL_DEBUG_FW(trans, - "UATS UEFI variable not found 0x%lx\n", - PTR_ERR(data)); - return -EINVAL; - } - - if (package_size < sizeof(*data)) { - IWL_DEBUG_FW(trans, - "Invalid UATS table UEFI variable len (%lu)\n", - package_size); - kfree(data); + data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UATS_NAME, + "UATS", sizeof(*data), NULL); + if (IS_ERR(data)) return -EINVAL; - } - - IWL_DEBUG_FW(trans, "Read UATS from UEFI with size %lu\n", - package_size); ret = iwl_uefi_uats_parse(data, fwrt); if (ret < 0) { @@ -438,4 +427,381 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans, return 0; } IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table); -#endif /* CONFIG_ACPI */ + +static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt, + struct uefi_sar_profile *uefi_sar_prof, + u8 prof_index, bool enabled) +{ + memcpy(&fwrt->sar_profiles[prof_index].chains, uefi_sar_prof, + sizeof(struct uefi_sar_profile)); + + fwrt->sar_profiles[prof_index].enabled = enabled & IWL_SAR_ENABLE_MSK; +} + +int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt) +{ + struct uefi_cnv_var_wrds *data; + int ret = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDS_NAME, + "WRDS", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_WRDS_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDS revision:%d\n", + data->revision); + goto out; + } + + /* The profile from WRDS is officially profile 1, but goes + * into sar_profiles[0] (because we don't have a profile 0). + */ + iwl_uefi_set_sar_profile(fwrt, &data->sar_profile, 0, data->mode); +out: + kfree(data); + return ret; +} + +int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt) +{ + struct uefi_cnv_var_ewrd *data; + int i, ret = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_EWRD_NAME, + "EWRD", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_EWRD_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI EWRD revision:%d\n", + data->revision); + goto out; + } + + if (data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) { + ret = -EINVAL; + goto out; + } + + for (i = 0; i < data->num_profiles; i++) + /* The EWRD profiles officially go from 2 to 4, but we + * save them in sar_profiles[1-3] (because we don't + * have profile 0). So in the array we start from 1. + */ + iwl_uefi_set_sar_profile(fwrt, &data->sar_profiles[i], i + 1, + data->mode); + +out: + kfree(data); + return ret; +} + +int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt) +{ + struct uefi_cnv_var_wgds *data; + int i, ret = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WGDS_NAME, + "WGDS", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_WGDS_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WGDS revision:%d\n", + data->revision); + goto out; + } + + if (data->num_profiles < BIOS_GEO_MIN_PROFILE_NUM || + data->num_profiles > BIOS_GEO_MAX_PROFILE_NUM) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Invalid number of profiles in WGDS: %d\n", + data->num_profiles); + goto out; + } + + fwrt->geo_rev = data->revision; + for (i = 0; i < data->num_profiles; i++) + memcpy(&fwrt->geo_profiles[i], &data->geo_profiles[i], + sizeof(struct iwl_geo_profile)); + + fwrt->geo_num_profiles = data->num_profiles; + fwrt->geo_enabled = true; +out: + kfree(data); + return ret; +} + +int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt) +{ + struct uefi_cnv_var_ppag *data; + int ret = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_PPAG_NAME, + "PPAG", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision < IWL_UEFI_MIN_PPAG_REV || + data->revision > IWL_UEFI_MAX_PPAG_REV) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PPAG revision:%d\n", + data->revision); + goto out; + } + + fwrt->ppag_ver = data->revision; + fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes, + fwrt->ppag_ver); + + BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains)); + memcpy(&fwrt->ppag_chains, &data->ppag_chains, + sizeof(data->ppag_chains)); +out: + kfree(data); + return ret; +} + +int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt, + struct iwl_tas_data *tas_data) +{ + struct uefi_cnv_var_wtas *uefi_tas; + int ret, enabled; + + uefi_tas = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WTAS_NAME, + "WTAS", sizeof(*uefi_tas), NULL); + if (IS_ERR(uefi_tas)) + return -EINVAL; + + if (uefi_tas->revision < IWL_UEFI_MIN_WTAS_REVISION || + uefi_tas->revision > IWL_UEFI_MAX_WTAS_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WTAS revision:%d\n", + uefi_tas->revision); + goto out; + } + + IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n", + uefi_tas->tas_selection); + + enabled = uefi_tas->tas_selection & IWL_WTAS_ENABLED_MSK; + tas_data->table_source = BIOS_SOURCE_UEFI; + tas_data->table_revision = uefi_tas->revision; + tas_data->tas_selection = uefi_tas->tas_selection; + + IWL_DEBUG_RADIO(fwrt, "TAS %s enabled\n", + enabled ? "is" : "not"); + + IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", + uefi_tas->revision); + if (uefi_tas->black_list_size > IWL_WTAS_BLACK_LIST_MAX) { + IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %d\n", + uefi_tas->black_list_size); + ret = -EINVAL; + goto out; + } + + tas_data->block_list_size = uefi_tas->black_list_size; + IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", uefi_tas->black_list_size); + + for (u8 i = 0; i < uefi_tas->black_list_size; i++) { + tas_data->block_list_array[i] = uefi_tas->black_list[i]; + IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", + uefi_tas->black_list[i]); + } + ret = enabled; +out: + kfree(uefi_tas); + return ret; +} + +int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt, + u64 *dflt_pwr_limit) +{ + struct uefi_cnv_var_splc *data; + int ret = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_SPLC_NAME, + "SPLC", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_SPLC_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI SPLC revision:%d\n", + data->revision); + goto out; + } + *dflt_pwr_limit = data->default_pwr_limit; +out: + kfree(data); + return ret; +} + +int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc) +{ + struct uefi_cnv_var_wrdd *data; + int ret = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDD_NAME, + "WRDD", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_WRDD_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n", + data->revision); + goto out; + } + + if (data->mcc != BIOS_MCC_CHINA) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "UEFI WRDD is supported only for CN\n"); + goto out; + } + + mcc[0] = (data->mcc >> 8) & 0xff; + mcc[1] = data->mcc & 0xff; + mcc[2] = '\0'; +out: + kfree(data); + return ret; +} + +int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk) +{ + struct uefi_cnv_var_eckv *data; + int ret = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME, + "ECKV", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_ECKV_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n", + data->revision); + goto out; + } + *extl_clk = data->ext_clock_valid; +out: + kfree(data); + return ret; +} + +int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value) +{ + struct uefi_cnv_wlan_wbem_data *data; + int ret = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WBEM_NAME, + "WBEM", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_WBEM_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WBEM revision:%d\n", + data->revision); + goto out; + } + *value = data->wbem_320mhz_per_mcc & IWL_UEFI_WBEM_REV0_MASK; + IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from UEFI\n"); +out: + kfree(data); + return ret; +} + +int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, + u32 *value) +{ + struct uefi_cnv_var_general_cfg *data; + int ret = -EINVAL; + + /* Not supported function index */ + if (func >= DSM_FUNC_NUM_FUNCS || func == 5) + return -EOPNOTSUPP; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_DSM_NAME, + "DSM", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_DSM_REVISION) { + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI DSM revision:%d\n", + data->revision); + goto out; + } + + if (ARRAY_SIZE(data->functions) != UEFI_MAX_DSM_FUNCS) { + IWL_DEBUG_RADIO(fwrt, "Invalid size of DSM functions array\n"); + goto out; + } + + *value = data->functions[func]; + ret = 0; +out: + kfree(data); + return ret; +} + +int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt) +{ + struct uefi_cnv_var_puncturing_data *data; + /* default value is not enabled if there is any issue in reading + * uefi variable or revision is not supported + */ + int puncturing = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, + IWL_UEFI_PUNCTURING_NAME, + "UefiCnvWlanPuncturing", + sizeof(*data), NULL); + if (IS_ERR(data)) + return puncturing; + + if (data->revision != IWL_UEFI_PUNCTURING_REVISION) { + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PUNCTURING rev:%d\n", + data->revision); + } else { + puncturing = data->puncturing & IWL_UEFI_PUNCTURING_REV0_MASK; + IWL_DEBUG_RADIO(fwrt, "Loaded puncturing bits from UEFI: %d\n", + puncturing); + } + + kfree(data); + return puncturing; +} +IWL_EXPORT_SYMBOL(iwl_uefi_get_puncturing); + +int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value) +{ + struct uefi_cnv_wlan_dsbr_data *data; + int ret = 0; + + data = iwl_uefi_get_verified_variable_guid(fwrt->trans, + &IWL_EFI_WIFI_BT_GUID, + IWL_UEFI_DSBR_NAME, "DSBR", + sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_DSBR_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI DSBR revision:%d\n", + data->revision); + goto out; + } + *value = data->config; + IWL_DEBUG_RADIO(fwrt, "Loaded DSBR config from UEFI value: 0x%x\n", + *value); +out: + kfree(data); + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index bf61a8df1225..0c8943a8bd01 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -1,19 +1,49 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright(c) 2021-2023 Intel Corporation + * Copyright(c) 2021-2024 Intel Corporation */ #ifndef __iwl_fw_uefi__ #define __iwl_fw_uefi__ +#include "fw/regulatory.h" + #define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm" #define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower" #define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping" #define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP" #define IWL_UEFI_UATS_NAME L"CnvUefiWlanUATS" +#define IWL_UEFI_WRDS_NAME L"UefiCnvWlanWRDS" +#define IWL_UEFI_EWRD_NAME L"UefiCnvWlanEWRD" +#define IWL_UEFI_WGDS_NAME L"UefiCnvWlanWGDS" +#define IWL_UEFI_PPAG_NAME L"UefiCnvWlanPPAG" +#define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS" +#define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC" +#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD" +#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV" +#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg" +#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM" +#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing" +#define IWL_UEFI_DSBR_NAME L"UefiCnvCommonDSBR" + #define IWL_SGOM_MAP_SIZE 339 #define IWL_UATS_MAP_SIZE 339 +#define IWL_UEFI_WRDS_REVISION 2 +#define IWL_UEFI_EWRD_REVISION 2 +#define IWL_UEFI_WGDS_REVISION 3 +#define IWL_UEFI_MIN_PPAG_REV 1 +#define IWL_UEFI_MAX_PPAG_REV 3 +#define IWL_UEFI_MIN_WTAS_REVISION 1 +#define IWL_UEFI_MAX_WTAS_REVISION 2 +#define IWL_UEFI_SPLC_REVISION 0 +#define IWL_UEFI_WRDD_REVISION 0 +#define IWL_UEFI_ECKV_REVISION 0 +#define IWL_UEFI_WBEM_REVISION 0 +#define IWL_UEFI_DSM_REVISION 4 +#define IWL_UEFI_PUNCTURING_REVISION 0 +#define IWL_UEFI_DSBR_REVISION 1 + struct pnvm_sku_package { u8 rev; u32 total_size; @@ -42,6 +72,165 @@ struct uefi_cnv_common_step_data { } __packed; /* + * struct uefi_sar_profile - a SAR profile as defined in UEFI + * + * @chains: a per-chain table of SAR values + */ +struct uefi_sar_profile { + struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE]; +} __packed; + +/* + * struct uefi_cnv_var_wrds - WRDS table as defined in UEFI + * + * @revision: the revision of the table + * @mode: is WRDS enbaled/disabled + * @sar_profile: sar profile #1 + */ +struct uefi_cnv_var_wrds { + u8 revision; + u32 mode; + struct uefi_sar_profile sar_profile; +} __packed; + +/* + * struct uefi_cnv_var_ewrd - EWRD table as defined in UEFI + * @revision: the revision of the table + * @mode: is WRDS enbaled/disabled + * @num_profiles: how many additional profiles we have in this table (0-3) + * @sar_profiles: the additional SAR profiles (#2-#4) + */ +struct uefi_cnv_var_ewrd { + u8 revision; + u32 mode; + u32 num_profiles; + struct uefi_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM - 1]; +} __packed; + +/* + * struct uefi_cnv_var_wgds - WGDS table as defined in UEFI + * @revision: the revision of the table + * @num_profiles: the number of geo profiles we have in the table. + * The first 3 are mandatory, and can have up to 8. + * @geo_profiles: a per-profile table of the offsets to add to SAR values. + */ +struct uefi_cnv_var_wgds { + u8 revision; + u8 num_profiles; + struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM]; +} __packed; + +/* + * struct uefi_cnv_var_ppag - PPAG table as defined in UEFI + * @revision: the revision of the table + * @ppag_modes: values from &enum iwl_ppag_flags + * @ppag_chains: the PPAG values per chain and band + */ +struct uefi_cnv_var_ppag { + u8 revision; + u32 ppag_modes; + struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS]; +} __packed; + +/* struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI + * @revision: the revision of the table + * @tas_selection: different options of TAS enablement. + * @black_list_size: the number of defined entried in the black list + * @black_list: a list of countries that are not allowed to use the TAS feature + */ +struct uefi_cnv_var_wtas { + u8 revision; + u32 tas_selection; + u8 black_list_size; + u16 black_list[IWL_WTAS_BLACK_LIST_MAX]; +} __packed; + +/* struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI + * @revision: the revision of the table + * @default_pwr_limit: The default maximum power per device + */ +struct uefi_cnv_var_splc { + u8 revision; + u32 default_pwr_limit; +} __packed; + +/* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI + * @revision: the revision of the table + * @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code + */ +struct uefi_cnv_var_wrdd { + u8 revision; + u32 mcc; +} __packed; + +/* struct uefi_cnv_var_eckv - ECKV table as defined in UEFI + * @revision: the revision of the table + * @ext_clock_valid: indicates if external 32KHz clock is valid + */ +struct uefi_cnv_var_eckv { + u8 revision; + u32 ext_clock_valid; +} __packed; + +#define UEFI_MAX_DSM_FUNCS 32 + +/* struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI + * @revision: the revision of the table + * @functions: payload of the different DSM functions + */ +struct uefi_cnv_var_general_cfg { + u8 revision; + u32 functions[UEFI_MAX_DSM_FUNCS]; +} __packed; + +#define IWL_UEFI_WBEM_REV0_MASK (BIT(0) | BIT(1)) +/* struct uefi_cnv_wlan_wbem_data - Bandwidth enablement per MCC as defined + * in UEFI + * @revision: the revision of the table + * @wbem_320mhz_per_mcc: enablement of 320MHz bandwidth per MCC + * bit 0 - if set, 320MHz is enabled for Japan + * bit 1 - if set, 320MHz is enabled for South Korea + * bit 2- 31, Reserved + */ +struct uefi_cnv_wlan_wbem_data { + u8 revision; + u32 wbem_320mhz_per_mcc; +} __packed; + +enum iwl_uefi_cnv_puncturing_flags { + IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK = BIT(0), + IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK = BIT(1), +}; + +#define IWL_UEFI_PUNCTURING_REV0_MASK (IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK | \ + IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK) +/** + * struct uefi_cnv_var_puncturing_data - controlling channel + * puncturing for few countries. + * @revision: the revision of the table + * @puncturing: enablement of channel puncturing per mcc + * see &enum iwl_uefi_cnv_puncturing_flags. + */ +struct uefi_cnv_var_puncturing_data { + u8 revision; + u32 puncturing; +} __packed; + +/** + * struct uefi_cnv_wlan_dsbr_data - BIOS STEP configuration information + * @revision: the revision of the table + * @config: STEP configuration flags: + * bit 8, switch to URM depending on FW setting + * bit 9, switch to URM + * + * Platform information for STEP configuration/workarounds. + */ +struct uefi_cnv_wlan_dsbr_data { + u8 revision; + u32 config; +} __packed; + +/* * This is known to be broken on v4.19 and to work on v5.4. Until we * figure out why this is the case and how to make it work, simply * disable the feature in old kernels. @@ -55,6 +244,24 @@ int iwl_uefi_reduce_power_parse(struct iwl_trans *trans, void iwl_uefi_get_step_table(struct iwl_trans *trans); int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, u32 tlv_len, struct iwl_pnvm_image *pnvm_data); +int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt, + struct iwl_tas_data *data); +int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt, + u64 *dflt_pwr_limit); +int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc); +int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk); +int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value); +int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, + u32 *value); +void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_uats_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value); #else /* CONFIG_EFI */ static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) { @@ -85,13 +292,61 @@ iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, { return 0; } -#endif /* CONFIG_EFI */ -#if defined(CONFIG_EFI) && defined(CONFIG_ACPI) -void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); -int iwl_uefi_get_uats_table(struct iwl_trans *trans, - struct iwl_fw_runtime *fwrt); -#else +static inline int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt) +{ + return -ENOENT; +} + +static inline int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt) +{ + return -ENOENT; +} + +static inline int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt) +{ + return -ENOENT; +} + +static inline int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt) +{ + return -ENOENT; +} + +static inline int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt, + struct iwl_tas_data *data) +{ + return -ENOENT; +} + +static inline int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt, + u64 *dflt_pwr_limit) +{ + *dflt_pwr_limit = 0; + return 0; +} + +static inline int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc) +{ + return -ENOENT; +} + +static inline int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk) +{ + return -ENOENT; +} + +static inline int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value) +{ + return -ENOENT; +} + +static inline int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, + enum iwl_dsm_funcs func, u32 *value) +{ + return -ENOENT; +} + static inline void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { @@ -104,5 +359,16 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans, return 0; } -#endif +static inline +int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt) +{ + return 0; +} + +static inline +int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value) +{ + return -ENOENT; +} +#endif /* CONFIG_EFI */ #endif /* __iwl_fw_uefi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index ae6f1cd4d660..2b6a80142aba 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ @@ -11,7 +11,9 @@ #include <linux/netdevice.h> #include <linux/ieee80211.h> #include <linux/nl80211.h> +#include <linux/mod_devicetable.h> #include "iwl-csr.h" +#include "iwl-drv.h" enum iwl_device_family { IWL_DEVICE_FAMILY_UNDEFINED, @@ -36,6 +38,7 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_AX210, IWL_DEVICE_FAMILY_BZ, IWL_DEVICE_FAMILY_SC, + IWL_DEVICE_FAMILY_DR, }; /* @@ -100,6 +103,10 @@ enum iwl_nvm_type { #define ANT_ABC (ANT_A | ANT_B | ANT_C) +#define IWL_FW_AND_PNVM(pfx, api) \ + MODULE_FIRMWARE(pfx "-" __stringify(api) ".ucode"); \ + MODULE_FIRMWARE(pfx ".pnvm") + static inline u8 num_of_ant(u8 mask) { return !!((mask) & ANT_A) + @@ -239,7 +246,7 @@ enum iwl_cfg_trans_ltr_delay { }; /** - * struct iwl_cfg_trans - information needed to start the trans + * struct iwl_cfg_trans_params - information needed to start the trans * * These values are specific to the device ID and do not change when * multiple configs are used for a single device ID. They values are @@ -256,6 +263,7 @@ enum iwl_cfg_trans_ltr_delay { * @mq_rx_supported: multi-queue rx support * @integrated: discrete or integrated * @low_latency_xtal: use the low latency xtal if supported + * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay. * @imr_enabled: use the IMR if supported. */ @@ -418,6 +426,11 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_BZ 0x46 #define IWL_CFG_MAC_TYPE_GL 0x47 #define IWL_CFG_MAC_TYPE_SC 0x48 +#define IWL_CFG_MAC_TYPE_SC2 0x49 +#define IWL_CFG_MAC_TYPE_SC2F 0x4A +#define IWL_CFG_MAC_TYPE_BZ_W 0x4B +#define IWL_CFG_MAC_TYPE_BR 0x4C +#define IWL_CFG_MAC_TYPE_DR 0x4D #define IWL_CFG_RF_TYPE_TH 0x105 #define IWL_CFG_RF_TYPE_TH1 0x108 @@ -426,10 +439,9 @@ struct iwl_cfg { #define IWL_CFG_RF_TYPE_HR2 0x10A #define IWL_CFG_RF_TYPE_HR1 0x10C #define IWL_CFG_RF_TYPE_GF 0x10D -#define IWL_CFG_RF_TYPE_MR 0x110 -#define IWL_CFG_RF_TYPE_MS 0x111 #define IWL_CFG_RF_TYPE_FM 0x112 #define IWL_CFG_RF_TYPE_WH 0x113 +#define IWL_CFG_RF_TYPE_PE 0x114 #define IWL_CFG_RF_ID_TH 0x1 #define IWL_CFG_RF_ID_TH1 0x1 @@ -442,6 +454,9 @@ struct iwl_cfg { #define IWL_CFG_NO_160 0x1 #define IWL_CFG_160 0x0 +#define IWL_CFG_NO_320 0x1 +#define IWL_CFG_320 0x0 + #define IWL_CFG_CORES_BT 0x0 #define IWL_CFG_CORES_BT_GNSS 0x5 @@ -471,6 +486,16 @@ struct iwl_dev_info { const char *name; }; +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +extern const struct iwl_dev_info iwl_dev_info_table[]; +extern const unsigned int iwl_dev_info_table_size; +const struct iwl_dev_info * +iwl_pci_find_dev_info(u16 device, u16 subsystem_device, + u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, + u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step); +extern const struct pci_device_id iwl_hw_card_ids[]; +#endif + /* * This list declares the config structures for all devices. */ @@ -487,7 +512,10 @@ extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg; extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_dr_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_br_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; extern const char iwl9260_1_name[]; @@ -526,7 +554,15 @@ extern const char iwl_ax221_name[]; extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; extern const char iwl_bz_name[]; +extern const char iwl_fm_name[]; +extern const char iwl_wh_name[]; +extern const char iwl_gl_name[]; +extern const char iwl_mtp_name[]; extern const char iwl_sc_name[]; +extern const char iwl_sc2_name[]; +extern const char iwl_sc2f_name[]; +extern const char iwl_dr_name[]; +extern const char iwl_br_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; extern const struct iwl_cfg iwl5100_agn_cfg; @@ -625,13 +661,16 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long; extern const struct iwl_cfg iwl_cfg_ma; extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0; -extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0; extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0; extern const struct iwl_cfg iwl_cfg_bz; extern const struct iwl_cfg iwl_cfg_gl; extern const struct iwl_cfg iwl_cfg_sc; +extern const struct iwl_cfg iwl_cfg_sc2; +extern const struct iwl_cfg iwl_cfg_sc2f; +extern const struct iwl_cfg iwl_cfg_dr; +extern const struct iwl_cfg iwl_cfg_br; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h index 1379dc2d231b..cd25a1b9f2ff 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018, 2020-2023 Intel Corporation + * Copyright (C) 2018, 2020-2024 Intel Corporation */ #ifndef __iwl_context_info_file_gen3_h__ #define __iwl_context_info_file_gen3_h__ @@ -56,6 +56,8 @@ enum iwl_prph_scratch_mtr_format { * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size + * @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE + * upon reset. */ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1), @@ -71,9 +73,20 @@ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20, + IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29), }; -/* +/** + * enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags + * @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting + * @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode + */ +enum iwl_prph_scratch_ext_flags { + IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4), + IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5), +}; + +/** * struct iwl_prph_scratch_version - version structure * @mac_id: SKU and revision id * @version: prph scratch information version id @@ -87,17 +100,18 @@ struct iwl_prph_scratch_version { __le16 reserved; } __packed; /* PERIPH_SCRATCH_VERSION_S */ -/* +/** * struct iwl_prph_scratch_control - control structure * @control_flags: context information flags see &enum iwl_prph_scratch_flags - * @reserved: reserved + * @control_flags_ext: context information for extended flags, + * see &enum iwl_prph_scratch_ext_flags */ struct iwl_prph_scratch_control { __le32 control_flags; - __le32 reserved; + __le32 control_flags_ext; } __packed; /* PERIPH_SCRATCH_CONTROL_S */ -/* +/** * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch * @pnvm_base_addr: PNVM start address * @pnvm_size: the size of the PNVM image in bytes @@ -117,7 +131,8 @@ struct iwl_prph_scratch_pnvm_cfg { struct iwl_prph_scrath_mem_desc_addr_array { __le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX]; } __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */ -/* + +/** * struct iwl_prph_scratch_hwm_cfg - hwm config * @hwm_base_addr: hwm start address * @hwm_size: hwm size in DWs @@ -129,7 +144,7 @@ struct iwl_prph_scratch_hwm_cfg { __le32 debug_token_config; } __packed; /* PERIPH_SCRATCH_HWM_CFG_S */ -/* +/** * struct iwl_prph_scratch_rbd_cfg - RBDs configuration * @free_rbd_addr: default queue free RB CB base address * @reserved: reserved @@ -139,10 +154,11 @@ struct iwl_prph_scratch_rbd_cfg { __le32 reserved; } __packed; /* PERIPH_SCRATCH_RBD_CFG_S */ -/* +/** * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table * @base_addr: reduce power table address * @size: the size of the entire power table image + * @reserved: (reserved) */ struct iwl_prph_scratch_uefi_cfg { __le64 base_addr; @@ -150,7 +166,7 @@ struct iwl_prph_scratch_uefi_cfg { __le32 reserved; } __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */ -/* +/** * struct iwl_prph_scratch_step_cfg - prph scratch step configuration * @mbx_addr_0: [0:7] revision, * [8:15] cnvi_to_cnvr length, @@ -164,13 +180,14 @@ struct iwl_prph_scratch_step_cfg { __le32 mbx_addr_1; } __packed; -/* +/** * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config * @version: version information of context info and HW * @control: control flags of FH configurations * @pnvm_cfg: ror configuration * @hwm_cfg: hwm configuration * @rbd_cfg: default RX queue configuration + * @reduce_power_cfg: UEFI power reduction table * @step_cfg: step configuration */ struct iwl_prph_scratch_ctrl_cfg { @@ -183,7 +200,7 @@ struct iwl_prph_scratch_ctrl_cfg { struct iwl_prph_scratch_step_cfg step_cfg; } __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */ -/* +/** * struct iwl_prph_scratch - peripheral scratch mapping * @ctrl_cfg: control and configuration of prph scratch * @dram: firmware images addresses in DRAM @@ -199,7 +216,7 @@ struct iwl_prph_scratch { struct iwl_context_info_dram dram; } __packed; /* PERIPH_SCRATCH_S */ -/* +/** * struct iwl_prph_info - peripheral information * @boot_stage_mirror: reflects the value in the Boot Stage CSR register * @ipc_status_mirror: reflects the value in the IPC Status CSR register @@ -213,7 +230,7 @@ struct iwl_prph_info { __le32 reserved; } __packed; /* PERIPH_INFO_S */ -/* +/** * struct iwl_context_info_gen3 - device INIT configuration * @version: version of the context information * @size: size of context information in DWs diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h index 1a1321db137c..dfd44fabf237 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h @@ -1,12 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2022 Intel Corporation + * Copyright (C) 2018-2020, 2022, 2024 Intel Corporation */ #ifndef __iwl_context_info_file_h__ #define __iwl_context_info_file_h__ -/* maximmum number of DRAM map entries supported by FW */ +/* maximum number of DRAM map entries supported by FW */ #define IWL_MAX_DRAM_ENTRY 64 #define CSR_CTXT_INFO_BA 0x40 @@ -53,11 +53,12 @@ enum iwl_context_info_flags { IWL_CTXT_INFO_RB_SIZE_32K = 0xe, }; -/* +/** * struct iwl_context_info_version - version structure * @mac_id: SKU and revision id * @version: context information version id * @size: the size of the context information in DWs + * @reserved: (reserved) */ struct iwl_context_info_version { __le16 mac_id; @@ -66,16 +67,17 @@ struct iwl_context_info_version { __le16 reserved; } __packed; -/* +/** * struct iwl_context_info_control - version structure * @control_flags: context information flags see &enum iwl_context_info_flags + * @reserved: (reserved) */ struct iwl_context_info_control { __le32 control_flags; __le32 reserved; } __packed; -/* +/** * struct iwl_context_info_dram - images DRAM map * each entry in the map represents a DRAM chunk of up to 32 KB * @umac_img: UMAC image DRAM map @@ -88,7 +90,7 @@ struct iwl_context_info_dram { __le64 virtual_img[IWL_MAX_DRAM_ENTRY]; } __packed; -/* +/** * struct iwl_context_info_rbd_cfg - RBDs configuration * @free_rbd_addr: default queue free RB CB base address * @used_rbd_addr: default queue used RB CB base address @@ -100,10 +102,11 @@ struct iwl_context_info_rbd_cfg { __le64 status_wr_ptr; } __packed; -/* +/** * struct iwl_context_info_hcmd_cfg - command queue configuration * @cmd_queue_addr: address of command queue * @cmd_queue_size: number of entries + * @reserved: (reserved) */ struct iwl_context_info_hcmd_cfg { __le64 cmd_queue_addr; @@ -111,10 +114,11 @@ struct iwl_context_info_hcmd_cfg { u8 reserved[7]; } __packed; -/* +/** * struct iwl_context_info_dump_cfg - Core Dump configuration * @core_dump_addr: core dump (debug DRAM address) start address * @core_dump_size: size, in DWs + * @reserved: (reserved) */ struct iwl_context_info_dump_cfg { __le64 core_dump_addr; @@ -122,10 +126,11 @@ struct iwl_context_info_dump_cfg { __le32 reserved; } __packed; -/* +/** * struct iwl_context_info_pnvm_cfg - platform NVM data configuration * @platform_nvm_addr: Platform NVM data start address * @platform_nvm_size: size in DWs + * @reserved: (reserved) */ struct iwl_context_info_pnvm_cfg { __le64 platform_nvm_addr; @@ -133,11 +138,12 @@ struct iwl_context_info_pnvm_cfg { __le32 reserved; } __packed; -/* +/** * struct iwl_context_info_early_dbg_cfg - early debug configuration for * dumping DRAM addresses * @early_debug_addr: early debug start address * @early_debug_size: size in DWs + * @reserved: (reserved) */ struct iwl_context_info_early_dbg_cfg { __le64 early_debug_addr; @@ -145,16 +151,20 @@ struct iwl_context_info_early_dbg_cfg { __le32 reserved; } __packed; -/* +/** * struct iwl_context_info - device INIT configuration * @version: version information of context info and HW * @control: control flags of FH configurations + * @reserved0: (reserved) * @rbd_cfg: default RX queue configuration * @hcmd_cfg: command queue configuration + * @reserved1: (reserved) * @dump_cfg: core dump data * @edbg_cfg: early debug configuration * @pnvm_cfg: platform nvm configuration + * @reserved2: (reserved) * @dram: firmware image addresses in DRAM + * @reserved3: (reserved) */ struct iwl_context_info { struct iwl_context_info_version version; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 4511d7fb2279..be9e464c9b7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -167,13 +167,15 @@ #define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12) #define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14) -#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) -#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) -#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ -#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ -#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ -#define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000) -#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */ +#define CSR_HW_IF_CONFIG_REG_HAP_WAKE 0x00080000 +/* NOTE: EEPROM_OWN_SEM is no longer defined for new HW */ +#define CSR_HW_IF_CONFIG_REG_EEPROM_OWN_SEM 0x00200000 +#define CSR_HW_IF_CONFIG_REG_PCI_OWN_SET 0x00400000 +#define CSR_HW_IF_CONFIG_REG_IAMT_UP 0x01000000 +#define CSR_HW_IF_CONFIG_REG_ME_OWN 0x02000000 +#define CSR_HW_IF_CONFIG_REG_WAKE_ME 0x08000000 +#define CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN 0x10000000 +#define CSR_HW_IF_CONFIG_REG_PERSISTENCE 0x40000000 #define CSR_MBOX_SET_REG_OS_ALIVE BIT(5) @@ -304,9 +306,7 @@ #define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28) #define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29) -/** - * hw_rev values - */ +/* hw_rev values */ enum { SILICON_A_STEP = 0, SILICON_B_STEP, @@ -353,7 +353,6 @@ enum { #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) #define CSR_HW_RF_ID_TYPE_GF (0x0010D000) #define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000) -#define CSR_HW_RF_ID_TYPE_MS (0x00111000) #define CSR_HW_RF_ID_TYPE_FM (0x00112000) #define CSR_HW_RF_ID_TYPE_WP (0x00113000) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 72075720969c..08d990ba8a79 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -64,21 +64,22 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = { [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,}, }; -static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv, - struct list_head *list) +/* add a new TLV node, returning it so it can be modified */ +static struct iwl_ucode_tlv *iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv, + struct list_head *list) { u32 len = le32_to_cpu(tlv->length); struct iwl_dbg_tlv_node *node; - node = kzalloc(sizeof(*node) + len, GFP_KERNEL); + node = kzalloc(struct_size(node, tlv.data, len), GFP_KERNEL); if (!node) - return -ENOMEM; + return NULL; memcpy(&node->tlv, tlv, sizeof(node->tlv)); memcpy(node->tlv.data, tlv->data, len); list_add_tail(&node->list, list); - return 0; + return &node->tlv; } static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv) @@ -103,10 +104,18 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans, if (le32_to_cpu(tlv->length) != sizeof(*debug_info)) return -EINVAL; + /* we use this as a string, ensure input was NUL terminated */ + if (strnlen(debug_info->debug_cfg_name, + sizeof(debug_info->debug_cfg_name)) == + sizeof(debug_info->debug_cfg_name)) + return -EINVAL; + IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n", debug_info->debug_cfg_name); - return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list); + if (!iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list)) + return -ENOMEM; + return 0; } static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans, @@ -175,7 +184,9 @@ static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans, return -EINVAL; } - return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list); + if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list)) + return -ENOMEM; + return 0; } static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, @@ -212,12 +223,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, return -EINVAL; } - if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG && - !trans->ops->read_config32) { - IWL_ERR(trans, "WRT: Unsupported region type %u\n", type); - return -EOPNOTSUPP; - } - if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) { trans->dbg.imr_data.sram_addr = le32_to_cpu(reg->internal_buffer.base_addr); @@ -246,11 +251,9 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data; - struct iwl_fw_ini_trigger_tlv *dup_trig; u32 tp = le32_to_cpu(trig->time_point); u32 rf = le32_to_cpu(trig->reset_fw); - struct iwl_ucode_tlv *dup = NULL; - int ret; + struct iwl_ucode_tlv *new_tlv; if (le32_to_cpu(tlv->length) < sizeof(*trig)) return -EINVAL; @@ -267,20 +270,18 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, "WRT: time point %u for trigger TLV with reset_fw %u\n", tp, rf); trans->dbg.last_tp_resetfw = 0xFF; + + new_tlv = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); + if (!new_tlv) + return -ENOMEM; + if (!le32_to_cpu(trig->occurrences)) { - dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length), - GFP_KERNEL); - if (!dup) - return -ENOMEM; - dup_trig = (void *)dup->data; - dup_trig->occurrences = cpu_to_le32(-1); - tlv = dup; - } + struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new_tlv->data; - ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); - kfree(dup); + new_trig->occurrences = cpu_to_le32(-1); + } - return ret; + return 0; } static int iwl_dbg_tlv_config_set(struct iwl_trans *trans, @@ -304,7 +305,9 @@ static int iwl_dbg_tlv_config_set(struct iwl_trans *trans, return -EINVAL; } - return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list); + if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list)) + return -ENOMEM; + return 0; } static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, @@ -1148,7 +1151,9 @@ iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt, if (!match) { IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n", le32_to_cpu(trig->time_point)); - return iwl_dbg_tlv_add(trig_tlv, trig_list); + if (!iwl_dbg_tlv_add(trig_tlv, trig_list)) + return -ENOMEM; + return 0; } return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match); @@ -1234,38 +1239,27 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, } } - fwrt->trans->dbg.restart_required = FALSE; - IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n", - tp, dump_data.trig->reset_fw); - IWL_DEBUG_FW(fwrt, - "WRT: restart_required %d, last_tp_resetfw %d\n", - fwrt->trans->dbg.restart_required, - fwrt->trans->dbg.last_tp_resetfw); + fwrt->trans->dbg.restart_required = false; if (fwrt->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) { - fwrt->trans->dbg.restart_required = TRUE; + fwrt->trans->dbg.restart_required = true; } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT && fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { - fwrt->trans->dbg.restart_required = FALSE; + fwrt->trans->dbg.restart_required = false; fwrt->trans->dbg.last_tp_resetfw = 0xFF; - IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n"); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) { - IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n"); - fwrt->trans->dbg.restart_required = TRUE; + fwrt->trans->dbg.restart_required = true; } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { - IWL_DEBUG_FW(fwrt, - "WRT: stop only and no reload firmware\n"); - fwrt->trans->dbg.restart_required = FALSE; + fwrt->trans->dbg.restart_required = false; fwrt->trans->dbg.last_tp_resetfw = le32_to_cpu(dump_data.trig->reset_fw); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_NOTHING) { - IWL_DEBUG_FW(fwrt, - "WRT: nothing need to be done after debug collection\n"); + /* nothing */ } else { IWL_ERR(fwrt, "WRT: wrong resetfw %d\n", le32_to_cpu(dump_data.trig->reset_fw)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h index 1b9f16a31b54..bf52c2edaad1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2021 Intel Corporation + * Copyright(c) 2018 - 2021, 2024 Intel Corporation * * Portions of this file are derived from the ipw3945 project. *****************************************************************************/ @@ -209,6 +209,7 @@ do { \ #define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) #define IWL_DEBUG_DEV_RADIO(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a) #define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) +#define IWL_DEBUG_DEV_POWER(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_POWER, f, ## a) #define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) #define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a) #define IWL_DEBUG_WOWLAN(p, f, a...) IWL_DEBUG(p, IWL_DL_WOWLAN, f, ## a) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h index 2c280a2fe3df..0d4a0896a2c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h @@ -3,7 +3,7 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019, 2023 Intel Corporation + * Copyright(c) 2018 - 2019, 2023-2024 Intel Corporation *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ) @@ -28,7 +28,7 @@ TRACE_EVENT(iwlwifi_dev_tx_tb, TP_fast_assign( DEV_ASSIGN; __entry->phys = phys; - if (iwl_trace_data(skb)) + if (__get_dynamic_array_len(data)) memcpy(__get_dynamic_array(data), data_src, data_len); ), TP_printk("[%s] TX frame data", __get_str(dev)) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index e656bf6bc003..ead72c3d33bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -4,7 +4,7 @@ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018, 2023 Intel Corporation + * Copyright(c) 2018, 2023-2024 Intel Corporation *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ) @@ -88,8 +88,8 @@ TRACE_EVENT(iwlwifi_dev_tx, * for the possible padding). */ __dynamic_array(u8, buf0, buf0_len) - __dynamic_array(u8, buf1, hdr_len > 0 && iwl_trace_data(skb) ? - 0 : skb->len - hdr_len) + __dynamic_array(u8, buf1, hdr_len > 0 && !iwl_trace_data(skb) ? + skb->len - hdr_len : 0) ), TP_fast_assign( DEV_ASSIGN; @@ -99,7 +99,7 @@ TRACE_EVENT(iwlwifi_dev_tx, __entry->framelen += skb->len - hdr_len; memcpy(__get_dynamic_array(tfd), tfd, tfdlen); memcpy(__get_dynamic_array(buf0), buf0, buf0_len); - if (hdr_len > 0 && !iwl_trace_data(skb)) + if (__get_dynamic_array_len(buf1)) skb_copy_bits(skb, hdr_len, __get_dynamic_array(buf1), skb->len - hdr_len); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h index 1d6c292cf545..0db1fa5477af 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h @@ -57,7 +57,7 @@ TRACE_EVENT(iwlwifi_dbg, ), TP_fast_assign( __entry->level = level; - __assign_str(function, function); + __assign_str(function); __assign_vstr(msg, vaf->fmt, vaf->va); ), TP_printk("%s", __get_str(msg)) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index c3e09f4fefeb..76166e1b10e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -87,7 +87,7 @@ static inline void trace_ ## name(proto) {} #endif #define DEV_ENTRY __string(dev, dev_name(dev)) -#define DEV_ASSIGN __assign_str(dev, dev_name(dev)) +#define DEV_ASSIGN __assign_str(dev) #include "iwl-devtrace-io.h" #include "iwl-devtrace-ucode.h" diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index abf8001bdac1..352b6e73e08f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -22,6 +22,7 @@ #include "iwl-modparams.h" #include "fw/api/alive.h" #include "fw/api/mac.h" +#include "fw/api/mac-cfg.h" /****************************************************************************** * @@ -137,8 +138,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) memset(&drv->fw, 0, sizeof(drv->fw)); } -static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, - struct fw_sec *sec) +static int iwl_alloc_fw_desc(struct fw_desc *desc, struct fw_sec *sec) { void *data; @@ -187,16 +187,11 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) case IWL_CFG_RF_TYPE_HR1: case IWL_CFG_RF_TYPE_HR2: rf = "hr"; + rf_step = 'b'; break; case IWL_CFG_RF_TYPE_GF: rf = "gf"; break; - case IWL_CFG_RF_TYPE_MR: - rf = "mr"; - break; - case IWL_CFG_RF_TYPE_MS: - rf = "ms"; - break; case IWL_CFG_RF_TYPE_FM: rf = "fm"; break; @@ -323,17 +318,6 @@ struct iwl_firmware_pieces { size_t n_mem_tlv; }; -/* - * These functions are just to extract uCode section data from the pieces - * structure. - */ -static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces, - enum iwl_ucode_type type, - int sec) -{ - return &pieces->img[type].sec[sec]; -} - static void alloc_sec_data(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) @@ -394,22 +378,18 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces, /* * Gets uCode section from tlv. */ -static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, - const void *data, enum iwl_ucode_type type, - int size) +static int iwl_store_ucode_sec(struct fw_img_parsing *img, + const void *data, int size) { - struct fw_img_parsing *img; struct fw_sec *sec; const struct fw_sec_parsing *sec_parse; size_t alloc_size; - if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX)) - return -1; + if (WARN_ON(!img || !data)) + return -EINVAL; sec_parse = (const struct fw_sec_parsing *)data; - img = &pieces->img[type]; - alloc_size = sizeof(*img->sec) * (img->sec_counter + 1); sec = krealloc(img->sec, alloc_size, GFP_KERNEL); if (!sec) @@ -905,18 +885,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_SEC_RT: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, - tlv_len); + iwl_store_ucode_sec(&pieces->img[IWL_UCODE_REGULAR], + tlv_data, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_INIT: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, - tlv_len); + iwl_store_ucode_sec(&pieces->img[IWL_UCODE_INIT], + tlv_data, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_WOWLAN: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, - tlv_len); + iwl_store_ucode_sec(&pieces->img[IWL_UCODE_WOWLAN], + tlv_data, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_DEF_CALIB: @@ -937,18 +917,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, FW_PHY_CFG_RX_CHAIN_POS; break; case IWL_UCODE_TLV_SECURE_SEC_RT: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, - tlv_len); + iwl_store_ucode_sec(&pieces->img[IWL_UCODE_REGULAR], + tlv_data, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_INIT: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, - tlv_len); + iwl_store_ucode_sec(&pieces->img[IWL_UCODE_INIT], + tlv_data, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_WOWLAN: - iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, - tlv_len); + iwl_store_ucode_sec(&pieces->img[IWL_UCODE_WOWLAN], + tlv_data, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_NUM_OF_CPU: @@ -987,16 +967,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, minor = le32_to_cpup(ptr++); local_comp = le32_to_cpup(ptr); - if (major >= 35) - snprintf(drv->fw.fw_version, - sizeof(drv->fw.fw_version), - "%u.%08x.%u %s", major, minor, - local_comp, iwl_reduced_fw_name(drv)); - else - snprintf(drv->fw.fw_version, - sizeof(drv->fw.fw_version), - "%u.%u.%u %s", major, minor, - local_comp, iwl_reduced_fw_name(drv)); + snprintf(drv->fw.fw_version, + sizeof(drv->fw.fw_version), + "%u.%08x.%u %s", major, minor, + local_comp, iwl_reduced_fw_name(drv)); break; } case IWL_UCODE_TLV_FW_DBG_DEST: { @@ -1121,9 +1095,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, } case IWL_UCODE_TLV_SEC_RT_USNIFFER: *usniffer_images = true; - iwl_store_ucode_sec(pieces, tlv_data, - IWL_UCODE_REGULAR_USNIFFER, - tlv_len); + iwl_store_ucode_sec(&pieces->img[IWL_UCODE_REGULAR_USNIFFER], + tlv_data, tlv_len); break; case IWL_UCODE_TLV_PAGING: if (tlv_len != sizeof(u32)) @@ -1208,7 +1181,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(*fseq_ver)) goto invalid_tlv_len; - IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n", + IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n", fseq_ver->version); } break; @@ -1216,7 +1189,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(u32)) goto invalid_tlv_len; if (le32_to_cpup((const __le32 *)tlv_data) > - IWL_MVM_STATION_COUNT_MAX) { + IWL_STATION_COUNT_MAX) { IWL_ERR(drv, "%d is an invalid number of station\n", le32_to_cpup((const __le32 *)tlv_data)); @@ -1225,6 +1198,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, capa->num_stations = le32_to_cpup((const __le32 *)tlv_data); break; + case IWL_UCODE_TLV_FW_NUM_LINKS: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + if (le32_to_cpup((const __le32 *)tlv_data) > + IWL_FW_MAX_LINK_ID + 1) { + IWL_ERR(drv, + "%d is an invalid number of links\n", + le32_to_cpup((const __le32 *)tlv_data)); + goto tlv_error; + } + capa->num_links = + le32_to_cpup((const __le32 *)tlv_data); + break; case IWL_UCODE_TLV_FW_NUM_BEACONS: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; @@ -1348,26 +1334,31 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -EINVAL; } -static int iwl_alloc_ucode(struct iwl_drv *drv, - struct iwl_firmware_pieces *pieces, - enum iwl_ucode_type type) +static int iwl_alloc_ucode_mem(struct fw_img *out, struct fw_img_parsing *img) { - int i; struct fw_desc *sec; - sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL); + sec = kcalloc(img->sec_counter, sizeof(*sec), GFP_KERNEL); if (!sec) return -ENOMEM; - drv->fw.img[type].sec = sec; - drv->fw.img[type].num_sec = pieces->img[type].sec_counter; - for (i = 0; i < pieces->img[type].sec_counter; i++) - if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i))) + out->sec = sec; + out->num_sec = img->sec_counter; + + for (int i = 0; i < out->num_sec; i++) + if (iwl_alloc_fw_desc(&sec[i], &img->sec[i])) return -ENOMEM; return 0; } +static int iwl_alloc_ucode(struct iwl_drv *drv, + struct iwl_firmware_pieces *pieces, + enum iwl_ucode_type type) +{ + return iwl_alloc_ucode_mem(&drv->fw.img[type], &pieces->img[type]); +} + static int validate_sec_sizes(struct iwl_drv *drv, struct iwl_firmware_pieces *pieces, const struct iwl_cfg *cfg) @@ -1440,18 +1431,21 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); - if (op_mode) + if (!IS_ERR(op_mode)) return op_mode; if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status)) break; - IWL_ERR(drv, "retry init count %d\n", retry); - #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; #endif + + if (PTR_ERR(op_mode) != -ETIMEDOUT) + break; + + IWL_ERR(drv, "retry init count %d\n", retry); } return NULL; @@ -1493,7 +1487,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; u32 api_ver; int i; - bool load_module = false; bool usniffer_images = false; bool failure = true; @@ -1501,7 +1494,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; - fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX; + fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX; fw->ucode_capa.num_beacons = 1; /* dump all fw memory areas by default */ fw->dbg.dump_mask = 0xffffffff; @@ -1741,19 +1734,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) goto out_unbind; } } else { - load_module = true; + request_module_nowait("%s", op->name); } mutex_unlock(&iwlwifi_opmode_table_mtx); complete(&drv->request_firmware_complete); - /* - * Load the module last so we don't block anything - * else from proceeding if the module fails to load - * or hangs loading. - */ - if (load_module) - request_module("%s", op->name); failure = false; goto free; @@ -1838,8 +1824,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) err_fw: #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_drv); - iwl_dbg_tlv_free(drv->trans); #endif + iwl_dbg_tlv_free(drv->trans); kfree(drv); err: return ERR_PTR(ret); @@ -1865,7 +1851,7 @@ void iwl_drv_stop(struct iwl_drv *drv) mutex_unlock(&iwlwifi_opmode_table_mtx); #ifdef CONFIG_IWLWIFI_DEBUGFS - drv->trans->ops->debugfs_cleanup(drv->trans); + iwl_trans_debugfs_cleanup(drv->trans); debugfs_remove_recursive(drv->dbgfs_drv); #endif @@ -1963,6 +1949,7 @@ module_init(iwl_drv_init); static void __exit iwl_drv_exit(void) { iwl_pci_unregister_driver(); + iwl_trans_free_restart_list(); #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(iwl_dbgfs_root); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 3d1a27ba35c6..854957bdf79d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -6,6 +6,7 @@ #ifndef __iwl_drv_h__ #define __iwl_drv_h__ #include <linux/export.h> +#include <kunit/visibility.h> /* for all modules */ #define DRV_NAME "iwlwifi" @@ -84,11 +85,19 @@ void iwl_drv_stop(struct iwl_drv *drv); * everything is built-in, then we can avoid that. */ #ifdef CONFIG_IWLWIFI_OPMODE_MODULAR -#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI) +#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, "IWLWIFI") #else #define IWL_EXPORT_SYMBOL(sym) #endif +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym) +#define VISIBLE_IF_IWLWIFI_KUNIT +#else +#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym) +#define VISIBLE_IF_IWLWIFI_KUNIT static +#endif + /* max retry for init flow */ #define IWL_MAX_INIT_RETRY 2 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c deleted file mode 100644 index 5f386bb1a353..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c +++ /dev/null @@ -1,394 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause -/* - * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation - */ -#include <linux/types.h> -#include <linux/slab.h> -#include <linux/export.h> - -#include "iwl-drv.h" -#include "iwl-debug.h" -#include "iwl-eeprom-read.h" -#include "iwl-io.h" -#include "iwl-prph.h" -#include "iwl-csr.h" - -/* - * EEPROM access time values: - * - * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. - * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). - * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. - * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. - */ -#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ - -/* - * The device's EEPROM semaphore prevents conflicts between driver and uCode - * when accessing the EEPROM; each access is a series of pulses to/from the - * EEPROM chip, not a single event, so even reads could conflict if they - * weren't arbitrated by the semaphore. - */ -#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ -#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ - - -static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans) -{ - u16 count; - int ret; - - for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) { - /* Request semaphore */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); - - /* See if we got it */ - ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - IWL_EEPROM_SEM_TIMEOUT); - if (ret >= 0) { - IWL_DEBUG_EEPROM(trans->dev, - "Acquired semaphore after %d tries.\n", - count+1); - return ret; - } - } - - return ret; -} - -static void iwl_eeprom_release_semaphore(struct iwl_trans *trans) -{ - iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); -} - -static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp) -{ - u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; - - IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp); - - switch (gp) { - case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: - if (!nvm_is_otp) { - IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", - gp); - return -ENOENT; - } - return 0; - case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: - case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: - if (nvm_is_otp) { - IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); - return -ENOENT; - } - return 0; - case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: - default: - IWL_ERR(trans, - "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n", - nvm_is_otp ? "OTP" : "EEPROM", gp); - return -ENOENT; - } -} - -/****************************************************************************** - * - * OTP related functions - * -******************************************************************************/ - -static void iwl_set_otp_access_absolute(struct iwl_trans *trans) -{ - iwl_read32(trans, CSR_OTP_GP_REG); - - iwl_clear_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_OTP_ACCESS_MODE); -} - -static int iwl_nvm_is_otp(struct iwl_trans *trans) -{ - u32 otpgp; - - /* OTP only valid for CP/PP and after */ - switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) { - case CSR_HW_REV_TYPE_NONE: - IWL_ERR(trans, "Unknown hardware type\n"); - return -EIO; - case CSR_HW_REV_TYPE_5300: - case CSR_HW_REV_TYPE_5350: - case CSR_HW_REV_TYPE_5100: - case CSR_HW_REV_TYPE_5150: - return 0; - default: - otpgp = iwl_read32(trans, CSR_OTP_GP_REG); - if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) - return 1; - return 0; - } -} - -static int iwl_init_otp_access(struct iwl_trans *trans) -{ - int ret; - - ret = iwl_finish_nic_init(trans); - if (ret) - return ret; - - iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - udelay(5); - iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - - /* - * CSR auto clock gate disable bit - - * this is only applicable for HW with OTP shadow RAM - */ - if (trans->trans_cfg->base_params->shadow_ram_support) - iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, - CSR_RESET_LINK_PWR_MGMT_DISABLED); - - return 0; -} - -static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr, - __le16 *eeprom_data) -{ - int ret = 0; - u32 r; - u32 otpgp; - - iwl_write32(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - ret = iwl_poll_bit(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_READ_VALID_MSK, - CSR_EEPROM_REG_READ_VALID_MSK, - IWL_EEPROM_ACCESS_TIMEOUT); - if (ret < 0) { - IWL_ERR(trans, "Time out reading OTP[%d]\n", addr); - return ret; - } - r = iwl_read32(trans, CSR_EEPROM_REG); - /* check for ECC errors: */ - otpgp = iwl_read32(trans, CSR_OTP_GP_REG); - if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { - /* stop in this case */ - /* set the uncorrectable OTP ECC bit for acknowledgment */ - iwl_set_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); - IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n"); - return -EINVAL; - } - if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { - /* continue in this case */ - /* set the correctable OTP ECC bit for acknowledgment */ - iwl_set_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); - IWL_ERR(trans, "Correctable OTP ECC error, continue read\n"); - } - *eeprom_data = cpu_to_le16(r >> 16); - return 0; -} - -/* - * iwl_is_otp_empty: check for empty OTP - */ -static bool iwl_is_otp_empty(struct iwl_trans *trans) -{ - u16 next_link_addr = 0; - __le16 link_value; - bool is_empty = false; - - /* locate the beginning of OTP link list */ - if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) { - if (!link_value) { - IWL_ERR(trans, "OTP is empty\n"); - is_empty = true; - } - } else { - IWL_ERR(trans, "Unable to read first block of OTP list.\n"); - is_empty = true; - } - - return is_empty; -} - - -/* - * iwl_find_otp_image: find EEPROM image in OTP - * finding the OTP block that contains the EEPROM image. - * the last valid block on the link list (the block _before_ the last block) - * is the block we should read and used to configure the device. - * If all the available OTP blocks are full, the last block will be the block - * we should read and used to configure the device. - * only perform this operation if shadow RAM is disabled - */ -static int iwl_find_otp_image(struct iwl_trans *trans, - u16 *validblockaddr) -{ - u16 next_link_addr = 0, valid_addr; - __le16 link_value = 0; - int usedblocks = 0; - - /* set addressing mode to absolute to traverse the link list */ - iwl_set_otp_access_absolute(trans); - - /* checking for empty OTP or error */ - if (iwl_is_otp_empty(trans)) - return -EINVAL; - - /* - * start traverse link list - * until reach the max number of OTP blocks - * different devices have different number of OTP blocks - */ - do { - /* save current valid block address - * check for more block on the link list - */ - valid_addr = next_link_addr; - next_link_addr = le16_to_cpu(link_value) * sizeof(u16); - IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n", - usedblocks, next_link_addr); - if (iwl_read_otp_word(trans, next_link_addr, &link_value)) - return -EINVAL; - if (!link_value) { - /* - * reach the end of link list, return success and - * set address point to the starting address - * of the image - */ - *validblockaddr = valid_addr; - /* skip first 2 bytes (link list pointer) */ - *validblockaddr += 2; - return 0; - } - /* more in the link list, continue */ - usedblocks++; - } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items); - - /* OTP has no valid blocks */ - IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n"); - return -EINVAL; -} - -/* - * iwl_read_eeprom - read EEPROM contents - * - * Load the EEPROM contents from adapter and return it - * and its size. - * - * NOTE: This routine uses the non-debug IO access functions. - */ -int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) -{ - __le16 *e; - u32 gp = iwl_read32(trans, CSR_EEPROM_GP); - int sz; - int ret; - u16 addr; - u16 validblockaddr = 0; - u16 cache_addr = 0; - int nvm_is_otp; - - if (!eeprom || !eeprom_size) - return -EINVAL; - - nvm_is_otp = iwl_nvm_is_otp(trans); - if (nvm_is_otp < 0) - return nvm_is_otp; - - sz = trans->trans_cfg->base_params->eeprom_size; - IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz); - - e = kmalloc(sz, GFP_KERNEL); - if (!e) - return -ENOMEM; - - ret = iwl_eeprom_verify_signature(trans, nvm_is_otp); - if (ret < 0) { - IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); - goto err_free; - } - - /* Make sure driver (instead of uCode) is allowed to read EEPROM */ - ret = iwl_eeprom_acquire_semaphore(trans); - if (ret < 0) { - IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); - goto err_free; - } - - if (nvm_is_otp) { - ret = iwl_init_otp_access(trans); - if (ret) { - IWL_ERR(trans, "Failed to initialize OTP access.\n"); - goto err_unlock; - } - - iwl_write32(trans, CSR_EEPROM_GP, - iwl_read32(trans, CSR_EEPROM_GP) & - ~CSR_EEPROM_GP_IF_OWNER_MSK); - - iwl_set_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | - CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); - /* traversing the linked list if no shadow ram supported */ - if (!trans->trans_cfg->base_params->shadow_ram_support) { - ret = iwl_find_otp_image(trans, &validblockaddr); - if (ret) - goto err_unlock; - } - for (addr = validblockaddr; addr < validblockaddr + sz; - addr += sizeof(u16)) { - __le16 eeprom_data; - - ret = iwl_read_otp_word(trans, addr, &eeprom_data); - if (ret) - goto err_unlock; - e[cache_addr / 2] = eeprom_data; - cache_addr += sizeof(u16); - } - } else { - /* eeprom is an array of 16bit values */ - for (addr = 0; addr < sz; addr += sizeof(u16)) { - u32 r; - - iwl_write32(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - - ret = iwl_poll_bit(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_READ_VALID_MSK, - CSR_EEPROM_REG_READ_VALID_MSK, - IWL_EEPROM_ACCESS_TIMEOUT); - if (ret < 0) { - IWL_ERR(trans, - "Time out reading EEPROM[%d]\n", addr); - goto err_unlock; - } - r = iwl_read32(trans, CSR_EEPROM_REG); - e[addr / 2] = cpu_to_le16(r >> 16); - } - } - - IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n", - nvm_is_otp ? "OTP" : "EEPROM"); - - iwl_eeprom_release_semaphore(trans); - - *eeprom_size = sz; - *eeprom = (u8 *)e; - return 0; - - err_unlock: - iwl_eeprom_release_semaphore(trans); - err_free: - kfree(e); - - return ret; -} -IWL_EXPORT_SYMBOL(iwl_read_eeprom); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h deleted file mode 100644 index 63b8e6c6659b..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* - * Copyright (C) 2005-2014 Intel Corporation - */ -#ifndef __iwl_eeprom_h__ -#define __iwl_eeprom_h__ - -#include "iwl-trans.h" - -int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size); - -#endif /* __iwl_eeprom_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index e0400ba2ab74..5c8f1868db64 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fh_h__ @@ -15,7 +15,7 @@ /* Flow Handler Definitions */ /****************************/ -/** +/* * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) * Addresses are offsets from device's PCI hardware base address. */ @@ -24,7 +24,7 @@ #define FH_MEM_LOWER_BOUND_GEN2 (0xa06000) #define FH_MEM_UPPER_BOUND_GEN2 (0xa08000) -/** +/* * Keep-Warm (KW) buffer base address. * * Driver must allocate a 4KByte buffer that is for keeping the @@ -44,7 +44,7 @@ #define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) -/** +/* * TFD Circular Buffers Base (CBBC) addresses * * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident @@ -143,7 +143,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, */ #define TFH_SRV_DMA_CHNL0_BC (0x1F70) -/** +/* * Rx SRAM Control and Status Registers (RSCSR) * * These registers provide handshake between driver and device for the Rx queue @@ -216,21 +216,21 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) -/** +/* * Physical base address of 8-byte Rx Status buffer. * Bit fields: * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. */ #define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) -/** +/* * Physical base address of Rx Buffer Descriptor Circular Buffer. * Bit fields: * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. */ #define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) -/** +/* * Rx write pointer (index, really!). * Bit fields: * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. @@ -242,7 +242,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c) #define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG -/** +/* * Rx Config/Status Registers (RCSR) * Rx Config Reg for channel 0 (only channel used) * @@ -300,7 +300,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) -/** +/* * Rx Shared Status Registers (RSSR) * * After stopping Rx DMA channel (writing 0 to @@ -356,7 +356,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define RFH_RBDBUF_RBD0_LSB 0xA08300 #define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) -/** +/* * RFH Status Register * * Bit fields: @@ -440,7 +440,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) #define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) -/** +/* * Transmit DMA Channel Control/Status Registers (TCSR) * * Device has one configuration register for each of 8 Tx DMA/FIFO channels @@ -501,7 +501,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) -/** +/* * Tx Shared Status Registers (TSSR) * * After stopping Tx DMA channel (writing 0 to @@ -518,7 +518,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) -/** +/* * Bit fields for TSSR(Tx Shared Status & Control) error status register: * 31: Indicates an address error when accessed to internal memory * uCode/driver must write "1" in order to clear this flag @@ -570,18 +570,19 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers - * @closed_rb_num [0:11] - Indicates the index of the RB which was closed - * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed - * @finished_rb_num [0:11] - Indicates the index of the current RB + * @closed_rb_num: [0:11] Indicates the index of the RB which was closed + * @closed_fr_num: [0:11] Indicates the index of the RX Frame which was closed + * @finished_rb_num: [0:11] Indicates the index of the current RB * in which the last frame was written to - * @finished_fr_num [0:11] - Indicates the index of the RX Frame + * @finished_fr_num: [0:11] Indicates the index of the RX Frame * which was transferred + * @__spare: reserved */ struct iwl_rb_status { __le16 closed_rb_num; __le16 closed_fr_num; __le16 finished_rb_num; - __le16 finished_fr_nam; + __le16 finished_fr_num; __le32 __spare; } __packed; @@ -633,7 +634,7 @@ enum iwl_tfd_tb_hi_n_len { }; /** - * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor + * struct iwl_tfd_tb - transmit buffer descriptor within transmit frame descriptor * * This structure contains dma address and length of transmission address * @@ -647,19 +648,19 @@ struct iwl_tfd_tb { } __packed; /** - * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor + * struct iwl_tfh_tb - transmit buffer descriptor within transmit frame descriptor * * This structure contains dma address and length of transmission address * - * @tb_len length of the tx buffer - * @addr 64 bits dma address + * @tb_len: length of the tx buffer + * @addr: 64 bits dma address */ struct iwl_tfh_tb { __le16 tb_len; __le64 addr; } __packed; -/** +/* * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. * Both driver and device share these circular buffers, each of which must be * contiguous 256 TFDs. @@ -698,10 +699,11 @@ struct iwl_tfd { /** * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD) - * @ num_tbs 0-4 number of active tbs - * 5 -15 reserved - * @ tbs[25] transmit frame buffer descriptors - * @ __pad padding + * @num_tbs: + * 0-4 number of active tbs + * 5-15 reserved + * @tbs: transmit frame buffer descriptors + * @__pad: padding */ struct iwl_tfh_tfd { __le16 num_tbs; @@ -715,13 +717,15 @@ struct iwl_tfh_tfd { /* Fixed (non-configurable) rx data from phy */ /** - * struct iwlagn_schedq_bc_tbl scheduler byte count table + * struct iwlagn_scd_bc_tbl - scheduler byte count table * base physical address provided by SCD_DRAM_BASE_ADDR * For devices up to 22000: - * @tfd_offset 0-12 - tx command byte count + * @tfd_offset: + * For devices up to 22000: + * 0-12 - tx command byte count * 12-16 - station index - * For 22000: - * @tfd_offset 0-12 - tx command byte count + * For 22000: + * 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved */ @@ -730,7 +734,7 @@ struct iwlagn_scd_bc_tbl { } __packed; /** - * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3 + * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3 * For AX210 and on: * @tfd_offset: 0-12 - tx command byte count * 12-13 - number of 64 byte chunks diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index c60f9466c5fd..0653ca8b974a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2022 Intel Corporation + * Copyright (C) 2003-2014, 2018-2022, 2024 Intel Corporation * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #include <linux/delay.h> @@ -460,7 +460,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans) */ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ | CSR_GP_CNTRL_REG_FLAG_MAC_INIT); poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; } else { @@ -526,5 +526,5 @@ void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, if (interrupts_enabled) iwl_trans_interrupts(trans, true); - iwl_trans_fw_error(trans, false); + iwl_trans_fw_error(trans, IWL_ERR_TYPE_NMI_FORCED); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 1cf26ab4f488..21eabfc3ffc8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022, 2024 Intel Corporation */ #ifndef __iwl_modparams_h__ #define __iwl_modparams_h__ @@ -106,4 +106,23 @@ static inline bool iwl_enable_tx_ampdu(void) return true; } +/* Verify amsdu_size module parameter and convert it to a rxb size */ +static inline enum iwl_amsdu_size +iwl_amsdu_size_to_rxb_size(void) +{ + switch (iwlwifi_mod_params.amsdu_size) { + case IWL_AMSDU_8K: + return IWL_AMSDU_8K; + case IWL_AMSDU_12K: + return IWL_AMSDU_12K; + default: + pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, + iwlwifi_mod_params.amsdu_size); + fallthrough; + case IWL_AMSDU_DEF: + case IWL_AMSDU_4K: + return IWL_AMSDU_4K; + } +} + #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 2f6774ec37b2..9f7e013252fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -38,16 +38,13 @@ enum nvm_offsets { N_HW_ADDRS = 3, NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, - /* NVM calibration section offset (in words) definitions */ - NVM_CALIB_SECTION = 0x2B8, - XTAL_CALIB = 0x316 - NVM_CALIB_SECTION, - /* NVM REGULATORY -Section offset (in words) definitions */ NVM_CHANNELS_SDP = 0, }; enum ext_nvm_offsets { /* NVM HW-Section offset (in words) definitions */ + MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, /* NVM SW-Section offset (in words) definitions */ @@ -144,8 +141,10 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = { /** * enum iwl_nvm_channel_flags - channel flags in NVM * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo - * @NVM_CHANNEL_IBSS: usable as an IBSS channel - * @NVM_CHANNEL_ACTIVE: active scanning allowed + * @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated + * when %IWL_NVM_SBANDS_FLAGS_LAR enabled. + * @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS + * when %IWL_NVM_SBANDS_FLAGS_LAR enabled. * @NVM_CHANNEL_RADAR: radar detection required * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS @@ -156,6 +155,8 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = { * @NVM_CHANNEL_80MHZ: 80 MHz channel okay * @NVM_CHANNEL_160MHZ: 160 MHz channel okay * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?) + * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP + * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP */ enum iwl_nvm_channel_flags { NVM_CHANNEL_VALID = BIT(0), @@ -170,6 +171,8 @@ enum iwl_nvm_channel_flags { NVM_CHANNEL_80MHZ = BIT(10), NVM_CHANNEL_160MHZ = BIT(11), NVM_CHANNEL_DC_HIGH = BIT(12), + NVM_CHANNEL_VLP = BIT(13), + NVM_CHANNEL_AFC = BIT(14), }; /** @@ -309,7 +312,7 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, /* Note: already can print up to 101 characters, 110 is the limit! */ IWL_DEBUG_DEV(dev, level, - "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", + "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", chan, flags, CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS), @@ -322,7 +325,9 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ), CHECK_AND_PRINT_I(160MHZ), - CHECK_AND_PRINT_I(DC_HIGH)); + CHECK_AND_PRINT_I(DC_HIGH), + CHECK_AND_PRINT_I(VLP), + CHECK_AND_PRINT_I(AFC)); #undef CHECK_AND_PRINT_I } @@ -366,6 +371,14 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band, (flags & IEEE80211_CHAN_NO_IR)) flags |= IEEE80211_CHAN_IR_CONCURRENT; + /* Set the AP type for the UHB case. */ + if (nvm_flags & NVM_CHANNEL_VLP) + flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP; + else + flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT; + if (!(nvm_flags & NVM_CHANNEL_AFC)) + flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT; + return flags; } @@ -380,11 +393,14 @@ static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx) return NL80211_BAND_2GHZ; } -static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, +static int iwl_init_channel_map(struct iwl_trans *trans, + const struct iwl_fw *fw, struct iwl_nvm_data *data, const void * const nvm_ch_flags, u32 sbands_flags, bool v4) { + const struct iwl_cfg *cfg = trans->cfg; + struct device *dev = trans->dev; int ch_idx; int n_channels = 0; struct ieee80211_channel *channel; @@ -466,11 +482,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, else channel->flags = 0; - /* TODO: Don't put limitations on UHB devices as we still don't - * have NVM for them - */ - if (cfg->uhb_supported) - channel->flags = 0; + if (fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS)) + channel->flags |= IEEE80211_CHAN_CAN_MONITOR; + iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, channel->hw_value, ch_flags); IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", @@ -585,7 +600,8 @@ static const u8 iwl_vendor_caps[] = { static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { { - .types_mask = BIT(NL80211_IFTYPE_STATION), + .types_mask = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT), .he_cap = { .has_he = true, .he_cap_elem = { @@ -695,10 +711,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI, .phy_cap_info[5] = + FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK, + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US) | IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | - IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | - IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT, + IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP, .phy_cap_info[6] = IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK | IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP, @@ -732,19 +749,22 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { /* * PPE thresholds for NSS = 2, and RU index bitmap set * to 0xc. + * Note: just for stating what we want, not present in + * the transmitted data due to not including + * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT. */ .eht_ppe_thres = {0xc1, 0x0e, 0xe0 } }, }, { - .types_mask = BIT(NL80211_IFTYPE_AP), + .types_mask = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = - IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL, @@ -799,7 +819,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI, .phy_cap_info[5] = - IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT, + FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK, + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US), }, /* For all MCS and bandwidth, set 2 NSS for both Tx and @@ -827,6 +848,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { /* * PPE thresholds for NSS = 2, and RU index bitmap set * to 0xc. + * Note: just for stating what we want, not present in + * the transmitted data due to not including + * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT. */ .eht_ppe_thres = {0xc1, 0x0e, 0xe0 } }, @@ -887,11 +911,13 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw) { - bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP); + bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO)); bool no_320; - no_320 = !trans->trans_cfg->integrated && - trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB; + no_320 = (!trans->trans_cfg->integrated && + trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB) || + trans->reduced_cap_sku; if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be) iftype_data->eht_cap.has_eht = false; @@ -1003,8 +1029,6 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case IWL_CFG_RF_TYPE_GF: - case IWL_CFG_RF_TYPE_MR: - case IWL_CFG_RF_TYPE_MS: case IWL_CFG_RF_TYPE_FM: case IWL_CFG_RF_TYPE_WH: iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |= @@ -1056,6 +1080,26 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ; } + + if (trans->step_urm) { + iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs11_max_nss = 0; + iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0; + } + + if (trans->no_160) + iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &= + ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + + if (trans->reduced_cap_sku) { + memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0, + sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320)); + iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0; + iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0; + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &= + ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA; + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= + ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; + } } static void iwl_init_he_hw_capab(struct iwl_trans *trans, @@ -1136,12 +1180,11 @@ static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_fw *fw) { struct device *dev = trans->dev; - const struct iwl_cfg *cfg = trans->cfg; int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; - n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, + n_channels = iwl_init_channel_map(trans, fw, data, nvm_ch_flags, sbands_flags, v4); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; @@ -1532,9 +1575,6 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, ®ulatory[NVM_CHANNELS_SDP] : &nvm_sw[NVM_CHANNELS]; - /* in family 8000 Xtal calibration values moved to OTP */ - data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); - data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); lar_enabled = true; } else { u16 lar_offset = data->nvm_version < 0xE39 ? @@ -1582,11 +1622,15 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, flags &= ~NL80211_RRF_NO_HT40PLUS; if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) flags &= ~NL80211_RRF_NO_HT40MINUS; - } else if (nvm_flags & NVM_CHANNEL_40MHZ) { + } else if (ch_idx < NUM_2GHZ_CHANNELS + NUM_5GHZ_CHANNELS && + nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~NL80211_RRF_NO_HT40PLUS; else flags &= ~NL80211_RRF_NO_HT40MINUS; + } else if (nvm_flags & NVM_CHANNEL_40MHZ) { + flags &= ~NL80211_RRF_NO_HT40PLUS; + flags &= ~NL80211_RRF_NO_HT40MINUS; } if (!(nvm_flags & NVM_CHANNEL_80MHZ)) @@ -1617,6 +1661,16 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, flags &= ~NL80211_RRF_NO_IR; } } + + /* Set the AP type for the UHB case. */ + if (nvm_flags & NVM_CHANNEL_VLP) + flags |= NL80211_RRF_ALLOW_6GHZ_VLP_AP; + else + flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT; + + if (!(nvm_flags & NVM_CHANNEL_AFC)) + flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT; + /* * reg_capa is per regulatory domain so apply it for every channel */ @@ -1679,7 +1733,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, const u16 *nvm_chan; struct ieee80211_regdomain *regd, *copy_rd; struct ieee80211_reg_rule *rule; - enum nl80211_band band; int center_freq, prev_center_freq = 0; int valid_rules = 0; bool new_rule; @@ -1723,8 +1776,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, reg_capa = iwl_get_reg_capa(cap, resp_ver); for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { + enum nl80211_band band = + iwl_nl80211_band_from_channel_idx(ch_idx); + ch_flags = (u16)__le32_to_cpup(channels + ch_idx); - band = iwl_nl80211_band_from_channel_idx(ch_idx); center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], band); new_rule = false; @@ -2097,7 +2152,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); - if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM) + if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM) nvm->sku_cap_11be_enable = true; /* Initialize PHY sku data */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 651ed25b683b..0c6c3fb8c6dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2023 Intel Corporation + * Copyright (C) 2005-2015, 2018-2024 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ #define __iwl_nvm_parse_h__ #include <net/cfg80211.h> -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "mei/iwl-mei.h" /** @@ -38,7 +38,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, u8 tx_chains, u8 rx_chains); /** - * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW + * iwl_parse_nvm_mcc_info - parse MCC (mobile country code) info coming from FW * * This function parses the regulatory channel data received as a * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c new file mode 100644 index 000000000000..b3c25acd3691 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation + * Copyright (C) 2015 Intel Mobile Communications GmbH + */ +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/export.h> +#include "iwl-drv.h" +#include "iwl-modparams.h" +#include "iwl-nvm-utils.h" + +int iwl_init_sband_channels(struct iwl_nvm_data *data, + struct ieee80211_supported_band *sband, + int n_channels, enum nl80211_band band) +{ + struct ieee80211_channel *chan = &data->channels[0]; + int n = 0, idx = 0; + + while (idx < n_channels && chan->band != band) + chan = &data->channels[++idx]; + + sband->channels = &data->channels[idx]; + + while (idx < n_channels && chan->band == band) { + chan = &data->channels[++idx]; + n++; + } + + sband->n_channels = n; + + return n; +} +IWL_EXPORT_SYMBOL(iwl_init_sband_channels); + +#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ +#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ + +void iwl_init_ht_hw_capab(struct iwl_trans *trans, + struct iwl_nvm_data *data, + struct ieee80211_sta_ht_cap *ht_info, + enum nl80211_band band, + u8 tx_chains, u8 rx_chains) +{ + const struct iwl_cfg *cfg = trans->cfg; + int max_bit_rate = 0; + + tx_chains = hweight8(tx_chains); + if (cfg->rx_with_siso_diversity) + rx_chains = 1; + else + rx_chains = hweight8(rx_chains); + + if (!(data->sku_cap_11n_enable) || + (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) || + !cfg->ht_params) { + ht_info->ht_supported = false; + return; + } + + if (data->sku_cap_mimo_disabled) + rx_chains = 1; + + ht_info->ht_supported = true; + ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; + + if (cfg->ht_params->stbc) { + ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + + if (tx_chains > 1) + ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; + } + + if (cfg->ht_params->ldpc) + ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; + + if (trans->trans_cfg->mq_rx_supported || + iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) + ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; + + ht_info->mcs.rx_mask[0] = 0xFF; + ht_info->mcs.rx_mask[1] = 0x00; + ht_info->mcs.rx_mask[2] = 0x00; + + if (rx_chains >= 2) + ht_info->mcs.rx_mask[1] = 0xFF; + if (rx_chains >= 3) + ht_info->mcs.rx_mask[2] = 0xFF; + + if (cfg->ht_params->ht_greenfield_support) + ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + + max_bit_rate = MAX_BIT_RATE_20_MHZ; + + if (cfg->ht_params->ht40_bands & BIT(band)) { + ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_info->cap |= IEEE80211_HT_CAP_SGI_40; + max_bit_rate = MAX_BIT_RATE_40_MHZ; + } + + /* Highest supported Rx data rate */ + max_bit_rate *= rx_chains; + WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); + ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); + + /* Tx MCS capabilities */ + ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains != rx_chains) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_chains - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } +} +IWL_EXPORT_SYMBOL(iwl_init_ht_hw_capab); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h index 34a178a2eb5d..ac0a29a1c31f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h @@ -58,23 +58,6 @@ struct iwl_nvm_data { struct ieee80211_channel channels[]; }; -/** - * iwl_parse_eeprom_data - parse EEPROM data and return values - * - * @trans: ransport we're parsing for, for debug only - * @cfg: device configuration for parsing and overrides - * @eeprom: the EEPROM data - * @eeprom_size: length of the EEPROM data - * - * This function parses all EEPROM values we need and then - * returns a (newly allocated) struct containing all the - * relevant values for driver use. The struct must be freed - * later with iwl_free_nvm_data(). - */ -struct iwl_nvm_data * -iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, - const u8 *eeprom, size_t eeprom_size); - int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, int n_channels, enum nl80211_band band); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index 3dc618a7c70f..34eca1a568ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -45,6 +45,55 @@ struct iwl_cfg; */ /** + * enum iwl_fw_error_type - FW error types/sources + * @IWL_ERR_TYPE_IRQ: "normal" FW error through an IRQ + * @IWL_ERR_TYPE_NMI_FORCED: NMI was forced by driver + * @IWL_ERR_TYPE_RESET_HS_TIMEOUT: reset handshake timed out, + * any debug collection must happen synchronously as + * the device will be shut down + * @IWL_ERR_TYPE_CMD_QUEUE_FULL: command queue was full + */ +enum iwl_fw_error_type { + IWL_ERR_TYPE_IRQ, + IWL_ERR_TYPE_NMI_FORCED, + IWL_ERR_TYPE_RESET_HS_TIMEOUT, + IWL_ERR_TYPE_CMD_QUEUE_FULL, +}; + +/** + * enum iwl_fw_error_context - error dump context + * @IWL_ERR_CONTEXT_WORKER: regular from worker context, + * opmode must acquire locks and must also check + * for @IWL_ERR_CONTEXT_ABORT after acquiring locks + * @IWL_ERR_CONTEXT_FROM_OPMODE: context is in a call + * originating from the opmode, e.g. while resetting + * or stopping the device, so opmode must not acquire + * any locks + * @IWL_ERR_CONTEXT_ABORT: after lock acquisition, indicates + * that the dump already happened via another callback + * (currently only while stopping the device) via the + * @IWL_ERR_CONTEXT_FROM_OPMODE context, and this call + * must be aborted + */ +enum iwl_fw_error_context { + IWL_ERR_CONTEXT_WORKER, + IWL_ERR_CONTEXT_FROM_OPMODE, + IWL_ERR_CONTEXT_ABORT, +}; + +/** + * struct iwl_fw_error_dump_mode - error dump mode for callback + * @type: The reason for the dump, per &enum iwl_fw_error_type. + * @context: The context for the dump, may also indicate this + * call needs to be skipped. This MUST be checked before + * and after acquiring any locks in the op-mode! + */ +struct iwl_fw_error_dump_mode { + enum iwl_fw_error_type type; + enum iwl_fw_error_context context; +}; + +/** * struct iwl_op_mode_ops - op_mode specific operations * * The op_mode exports its ops so that external components can start it and @@ -68,21 +117,28 @@ struct iwl_cfg; * Must be atomic and called with BH disabled. * @queue_not_full: notifies that a HW queue is not full any more. * Must be atomic and called with BH disabled. - * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that + * @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that * the radio is killed. Return %true if the device should be stopped by * the transport immediately after the call. May sleep. + * Note that this must not return %true for newer devices using gen2 PCIe + * transport. * @free_skb: allows the transport layer to free skbs that haven't been * reclaimed by the op_mode. This can happen when the driver is freed and * there are Tx packets pending in the transport layer. * Must be atomic - * @nic_error: error notification. Must be atomic and must be called with BH - * disabled, unless the sync parameter is true. - * @cmd_queue_full: Called when the command queue gets full. Must be atomic and - * called with BH disabled. + * @nic_error: error notification. Must be atomic, the op mode should handle + * the error (e.g. abort notification waiters) and print the error if + * applicable + * @dump_error: NIC error dump collection (can sleep, synchronous) + * @sw_reset: (maybe) initiate a software reset, return %true if started * @nic_config: configure NIC, called before firmware is started. * May sleep * @wimax_active: invoked when WiMax becomes active. May sleep * @time_point: called when transport layer wants to collect debug data + * @device_powered_off: called upon resume from hibernation but not only. + * Op_mode needs to reset its internal state because the device did not + * survive the system state transition. The firmware is no longer running, + * etc... */ struct iwl_op_mode_ops { struct iwl_op_mode *(*start)(struct iwl_trans *trans, @@ -98,13 +154,18 @@ struct iwl_op_mode_ops { void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); - void (*nic_error)(struct iwl_op_mode *op_mode, bool sync); - void (*cmd_queue_full)(struct iwl_op_mode *op_mode); + void (*nic_error)(struct iwl_op_mode *op_mode, + enum iwl_fw_error_type type); + void (*dump_error)(struct iwl_op_mode *op_mode, + struct iwl_fw_error_dump_mode *mode); + bool (*sw_reset)(struct iwl_op_mode *op_mode, + enum iwl_fw_error_type type); void (*nic_config)(struct iwl_op_mode *op_mode); void (*wimax_active)(struct iwl_op_mode *op_mode); void (*time_point)(struct iwl_op_mode *op_mode, enum iwl_fw_ini_time_point tp_id, union iwl_dbg_tlv_tp_data *tp_data); + void (*device_powered_off)(struct iwl_op_mode *op_mode); }; int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops); @@ -170,20 +231,26 @@ static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, op_mode->ops->free_skb(op_mode, skb); } -static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync) +static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, + enum iwl_fw_error_type type) { - op_mode->ops->nic_error(op_mode, sync); + op_mode->ops->nic_error(op_mode, type); } -static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode) +static inline void iwl_op_mode_dump_error(struct iwl_op_mode *op_mode, + struct iwl_fw_error_dump_mode *mode) { - op_mode->ops->cmd_queue_full(op_mode); + might_sleep(); + + if (op_mode->ops->dump_error) + op_mode->ops->dump_error(op_mode, mode); } static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode) { might_sleep(); - op_mode->ops->nic_config(op_mode); + if (op_mode->ops->nic_config) + op_mode->ops->nic_config(op_mode); } static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) @@ -201,4 +268,11 @@ static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode, op_mode->ops->time_point(op_mode, tp_id, tp_data); } +static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode) +{ + if (!op_mode || !op_mode->ops || !op_mode->ops->device_powered_off) + return; + op_mode->ops->device_powered_off(op_mode); +} + #endif /* __iwl_op_mode_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index dd32c287b983..23b2009fbb28 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -96,7 +96,7 @@ #define DTSC_PTAT_AVG (0x00a10650) -/** +/* * Tx Scheduler * * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs @@ -169,7 +169,7 @@ */ #define SCD_MEM_LOWER_BOUND (0x0000) -/** +/* * Max Tx window size is the max number of contiguous TFDs that the scheduler * can keep track of at one time when creating block-ack chains of frames. * Note that "64" matches the number of ack bits in a block-ack packet. @@ -368,12 +368,26 @@ enum { WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, }; -#define CNVI_AUX_MISC_CHIP 0xA200B0 +#define CNVI_AUX_MISC_CHIP 0xA200B0 +#define CNVI_AUX_MISC_CHIP_MAC_STEP(_val) (((_val) & 0xf000000) >> 24) +#define CNVI_AUX_MISC_CHIP_PROD_TYPE(_val) ((_val) & 0xfff) +#define CNVI_AUX_MISC_CHIP_PROD_TYPE_GL 0x910 +#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U 0x930 +#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_I 0x900 +#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_W 0x901 + #define CNVR_AUX_MISC_CHIP 0xA2B800 #define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890 #define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938 #define CNVI_SCU_SEQ_DATA_DW9 0xA27488 +#define CNVI_SCU_REG_FOR_ECO_1 0xA26EF8 +#define CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN BIT(4) +#define CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT BIT(5) + +#define CNVI_PMU_STEP_FLOW 0xA2D588 +#define CNVI_PMU_STEP_FLOW_FORCE_URM BIT(2) + #define PREG_AUX_BUS_WPROT_0 0xA04CC0 /* device family 9000 WPROT register */ @@ -446,12 +460,9 @@ enum { #define REG_CRF_ID_TYPE_HR_NONE_CDB_1X1 0x501 #define REG_CRF_ID_TYPE_HR_NONE_CDB_CCP 0x532 #define REG_CRF_ID_TYPE_GF 0x410 -#define REG_CRF_ID_TYPE_GF_TC 0xF08 -#define REG_CRF_ID_TYPE_MR 0x810 #define REG_CRF_ID_TYPE_FM 0x910 -#define REG_CRF_ID_TYPE_FMI 0x930 -#define REG_CRF_ID_TYPE_FMR 0x900 #define REG_CRF_ID_TYPE_WHP 0xA10 +#define REG_CRF_ID_TYPE_PE 0xA30 #define HPM_DEBUG 0xA03440 #define PERSISTENCE_BIT BIT(12) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index f95098c21c7d..47854a36413e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -2,27 +2,215 @@ /* * Copyright (C) 2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021, 2023 Intel Corporation + * Copyright (C) 2019-2021, 2023-2024 Intel Corporation */ #include <linux/kernel.h> #include <linux/bsearch.h> +#include <linux/list.h> #include "fw/api/tx.h" #include "iwl-trans.h" #include "iwl-drv.h" #include "iwl-fh.h" -#include "queue/tx.h" #include <linux/dmapool.h> #include "fw/api/commands.h" +#include "pcie/internal.h" +#include "iwl-context-info-gen3.h" + +struct iwl_trans_dev_restart_data { + struct list_head list; + unsigned int restart_count; + time64_t last_error; + char name[]; +}; + +static LIST_HEAD(restart_data_list); +static DEFINE_SPINLOCK(restart_data_lock); + +static struct iwl_trans_dev_restart_data * +iwl_trans_get_restart_data(struct device *dev) +{ + struct iwl_trans_dev_restart_data *tmp, *data = NULL; + const char *name = dev_name(dev); + + spin_lock(&restart_data_lock); + list_for_each_entry(tmp, &restart_data_list, list) { + if (strcmp(tmp->name, name)) + continue; + data = tmp; + break; + } + spin_unlock(&restart_data_lock); + + if (data) + return data; + + data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC); + if (!data) + return NULL; + + strcpy(data->name, name); + spin_lock(&restart_data_lock); + list_add_tail(&data->list, &restart_data_list); + spin_unlock(&restart_data_lock); + + return data; +} + +static void iwl_trans_inc_restart_count(struct device *dev) +{ + struct iwl_trans_dev_restart_data *data; + + data = iwl_trans_get_restart_data(dev); + if (data) { + data->last_error = ktime_get_boottime_seconds(); + data->restart_count++; + } +} + +void iwl_trans_free_restart_list(void) +{ + struct iwl_trans_dev_restart_data *tmp; + + while ((tmp = list_first_entry_or_null(&restart_data_list, + typeof(*tmp), list))) { + list_del(&tmp->list); + kfree(tmp); + } +} + +struct iwl_trans_reprobe { + struct device *dev; + struct work_struct work; +}; + +static void iwl_trans_reprobe_wk(struct work_struct *wk) +{ + struct iwl_trans_reprobe *reprobe; + + reprobe = container_of(wk, typeof(*reprobe), work); + + if (device_reprobe(reprobe->dev)) + dev_err(reprobe->dev, "reprobe failed!\n"); + put_device(reprobe->dev); + kfree(reprobe); + module_put(THIS_MODULE); +} + +#define IWL_TRANS_RESET_OK_TIME 180 /* seconds */ + +static enum iwl_reset_mode +iwl_trans_determine_restart_mode(struct iwl_trans *trans) +{ + struct iwl_trans_dev_restart_data *data; + enum iwl_reset_mode at_least = 0; + unsigned int index; + static const enum iwl_reset_mode escalation_list[] = { + IWL_RESET_MODE_SW_RESET, + IWL_RESET_MODE_REPROBE, + IWL_RESET_MODE_REPROBE, + IWL_RESET_MODE_FUNC_RESET, + /* FIXME: add TOP reset */ + IWL_RESET_MODE_PROD_RESET, + /* FIXME: add TOP reset */ + IWL_RESET_MODE_PROD_RESET, + /* FIXME: add TOP reset */ + IWL_RESET_MODE_PROD_RESET, + }; + + if (trans->restart.during_reset) + at_least = IWL_RESET_MODE_REPROBE; + + data = iwl_trans_get_restart_data(trans->dev); + if (!data) + return at_least; + + if (ktime_get_boottime_seconds() - data->last_error >= + IWL_TRANS_RESET_OK_TIME) + data->restart_count = 0; + + index = data->restart_count; + if (index >= ARRAY_SIZE(escalation_list)) + index = ARRAY_SIZE(escalation_list) - 1; + + return max(at_least, escalation_list[index]); +} + +#define IWL_TRANS_RESET_DELAY (HZ * 60) + +static void iwl_trans_restart_wk(struct work_struct *wk) +{ + struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk); + struct iwl_trans_reprobe *reprobe; + enum iwl_reset_mode mode; + + if (!trans->op_mode) + return; + + /* might have been scheduled before marked as dead, re-check */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return; + + iwl_op_mode_dump_error(trans->op_mode, &trans->restart.mode); + + /* + * If the opmode stopped the device while we were trying to dump and + * reset, then we'll have done the dump already (synchronized by the + * opmode lock that it will acquire in iwl_op_mode_dump_error()) and + * managed that via trans->restart.mode. + * Additionally, make sure that in such a case we won't attempt to do + * any resets now, since it's no longer requested. + */ + if (!test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) + return; + + if (!iwlwifi_mod_params.fw_restart) + return; + + mode = iwl_trans_determine_restart_mode(trans); + + iwl_trans_inc_restart_count(trans->dev); + + switch (mode) { + case IWL_RESET_MODE_SW_RESET: + IWL_ERR(trans, "Device error - SW reset\n"); + iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type); + break; + case IWL_RESET_MODE_REPROBE: + IWL_ERR(trans, "Device error - reprobe!\n"); + + /* + * get a module reference to avoid doing this while unloading + * anyway and to avoid scheduling a work with code that's + * being removed. + */ + if (!try_module_get(THIS_MODULE)) { + IWL_ERR(trans, "Module is being unloaded - abort\n"); + return; + } + + reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL); + if (!reprobe) { + module_put(THIS_MODULE); + return; + } + reprobe->dev = get_device(trans->dev); + INIT_WORK(&reprobe->work, iwl_trans_reprobe_wk); + schedule_work(&reprobe->work); + break; + default: + iwl_trans_pcie_reset(trans, mode); + break; + } +} struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops, const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans *trans; #ifdef CONFIG_LOCKDEP - static struct lock_class_key __key; + static struct lock_class_key __sync_cmd_key; #endif trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL); @@ -33,25 +221,13 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, #ifdef CONFIG_LOCKDEP lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", - &__key, 0); + &__sync_cmd_key, 0); #endif trans->dev = dev; - trans->ops = ops; trans->num_rx_queues = 1; - WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); - - if (trans->trans_cfg->gen2) { - trans->txqs.tfd.addr_size = 64; - trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; - trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); - } else { - trans->txqs.tfd.addr_size = 36; - trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; - trans->txqs.tfd.size = sizeof(struct iwl_tfd); - } - trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans); + INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk); return trans; } @@ -78,31 +254,6 @@ int iwl_trans_init(struct iwl_trans *trans) if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) return -EINVAL; - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) - trans->txqs.bc_tbl_size = - sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ; - else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - trans->txqs.bc_tbl_size = - sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210; - else - trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); - /* - * For gen2 devices, we use a single allocation for each byte-count - * table, but they're pretty small (1k) so use a DMA pool that we - * allocate here. - */ - if (trans->trans_cfg->gen2) { - trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev, - trans->txqs.bc_tbl_size, - 256, 0); - if (!trans->txqs.bc_pool) - return -ENOMEM; - } - - /* Some things must not change even if the config does */ - WARN_ON(trans->txqs.tfd.addr_size != - (trans->trans_cfg->gen2 ? 64 : 36)); - snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = @@ -112,12 +263,6 @@ int iwl_trans_init(struct iwl_trans *trans) if (!trans->dev_cmd_pool) return -ENOMEM; - trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); - if (!trans->txqs.tso_hdr_page) { - kmem_cache_destroy(trans->dev_cmd_pool); - return -ENOMEM; - } - /* Initialize the wait queue for commands */ init_waitqueue_head(&trans->wait_command_queue); @@ -126,20 +271,7 @@ int iwl_trans_init(struct iwl_trans *trans) void iwl_trans_free(struct iwl_trans *trans) { - int i; - - if (trans->txqs.tso_hdr_page) { - for_each_possible_cpu(i) { - struct iwl_tso_hdr_page *p = - per_cpu_ptr(trans->txqs.tso_hdr_page, i); - - if (p && p->page) - __free_page(p->page); - } - - free_percpu(trans->txqs.tso_hdr_page); - } - + cancel_work_sync(&trans->restart.wk); kmem_cache_destroy(trans->dev_cmd_pool); } @@ -167,10 +299,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) return -EIO; - } if (!(cmd->flags & CMD_ASYNC)) lock_map_acquire_read(&trans->sync_cmd_lockdep_map); @@ -180,7 +311,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) cmd->id = DEF_ID(cmd->id); } - ret = iwl_trans_txq_send_hcmd(trans, cmd); + ret = iwl_trans_pcie_send_hcmd(trans, cmd); if (!(cmd->flags & CMD_ASYNC)) lock_map_release(&trans->sync_cmd_lockdep_map); @@ -247,3 +378,409 @@ int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) return 0; } IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); + +void iwl_trans_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg) +{ + trans->op_mode = trans_cfg->op_mode; + + iwl_trans_pcie_configure(trans, trans_cfg); + WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); +} +IWL_EXPORT_SYMBOL(iwl_trans_configure); + +int iwl_trans_start_hw(struct iwl_trans *trans) +{ + might_sleep(); + + return iwl_trans_pcie_start_hw(trans); +} +IWL_EXPORT_SYMBOL(iwl_trans_start_hw); + +void iwl_trans_op_mode_leave(struct iwl_trans *trans) +{ + might_sleep(); + + iwl_trans_pcie_op_mode_leave(trans); + + cancel_work_sync(&trans->restart.wk); + + trans->op_mode = NULL; + + trans->state = IWL_TRANS_NO_FW; +} +IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave); + +void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) +{ + iwl_trans_pcie_write8(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_trans_write8); + +void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) +{ + iwl_trans_pcie_write32(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_trans_write32); + +u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) +{ + return iwl_trans_pcie_read32(trans, ofs); +} +IWL_EXPORT_SYMBOL(iwl_trans_read32); + +u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) +{ + return iwl_trans_pcie_read_prph(trans, ofs); +} +IWL_EXPORT_SYMBOL(iwl_trans_read_prph); + +void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) +{ + return iwl_trans_pcie_write_prph(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_trans_write_prph); + +int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) +{ + return iwl_trans_pcie_read_mem(trans, addr, buf, dwords); +} +IWL_EXPORT_SYMBOL(iwl_trans_read_mem); + +int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords) +{ + return iwl_trans_pcie_write_mem(trans, addr, buf, dwords); +} +IWL_EXPORT_SYMBOL(iwl_trans_write_mem); + +void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) +{ + if (state) + set_bit(STATUS_TPOWER_PMI, &trans->status); + else + clear_bit(STATUS_TPOWER_PMI, &trans->status); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_pmi); + +int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership) +{ + return iwl_trans_pcie_sw_reset(trans, retake_ownership); +} +IWL_EXPORT_SYMBOL(iwl_trans_sw_reset); + +struct iwl_trans_dump_data * +iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx) +{ + return iwl_trans_pcie_dump_data(trans, dump_mask, + sanitize_ops, sanitize_ctx); +} +IWL_EXPORT_SYMBOL(iwl_trans_dump_data); + +int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset) +{ + might_sleep(); + + return iwl_trans_pcie_d3_suspend(trans, test, reset); +} +IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend); + +int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, + bool test, bool reset) +{ + might_sleep(); + + return iwl_trans_pcie_d3_resume(trans, status, test, reset); +} +IWL_EXPORT_SYMBOL(iwl_trans_d3_resume); + +void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) +{ + iwl_trans_pci_interrupts(trans, enable); +} +IWL_EXPORT_SYMBOL(iwl_trans_interrupts); + +void iwl_trans_sync_nmi(struct iwl_trans *trans) +{ + iwl_trans_pcie_sync_nmi(trans); +} +IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi); + +int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, + u64 src_addr, u32 byte_cnt) +{ + return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt); +} +IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem); + +void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value) +{ + iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask); + +int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, + u32 *val) +{ + return iwl_trans_pcie_read_config32(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_trans_read_config32); + +bool _iwl_trans_grab_nic_access(struct iwl_trans *trans) +{ + return iwl_trans_pcie_grab_nic_access(trans); +} +IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access); + +void __releases(nic_access) +iwl_trans_release_nic_access(struct iwl_trans *trans) +{ + iwl_trans_pcie_release_nic_access(trans); + __release(nic_access); +} +IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); + +void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) +{ + might_sleep(); + + trans->state = IWL_TRANS_FW_ALIVE; + + if (trans->trans_cfg->gen2) + iwl_trans_pcie_gen2_fw_alive(trans); + else + iwl_trans_pcie_fw_alive(trans, scd_addr); +} +IWL_EXPORT_SYMBOL(iwl_trans_fw_alive); + +int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, + bool run_in_rfkill) +{ + int ret; + + might_sleep(); + + WARN_ON_ONCE(!trans->rx_mpdu_cmd); + + clear_bit(STATUS_FW_ERROR, &trans->status); + + if (trans->trans_cfg->gen2) + ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill); + else + ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill); + + if (ret == 0) + trans->state = IWL_TRANS_FW_STARTED; + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_trans_start_fw); + +void iwl_trans_stop_device(struct iwl_trans *trans) +{ + might_sleep(); + + /* + * See also the comment in iwl_trans_restart_wk(). + * + * When the opmode stops the device while a reset is pending, the + * worker (iwl_trans_restart_wk) might not have run yet or, more + * likely, will be blocked on the opmode lock. Due to the locking, + * we can't just flush the worker. + * + * If this is the case, then the test_and_clear_bit() ensures that + * the worker won't attempt to do anything after the stop. + * + * The trans->restart.mode is a handshake with the opmode, we set + * the context there to ABORT so that when the worker can finally + * acquire the lock in the opmode, the code there won't attempt to + * do any dumps. Since we'd really like to have the dump though, + * also do it inline here (with the opmode locks already held), + * but use a separate mode struct to avoid races. + */ + if (test_and_clear_bit(STATUS_RESET_PENDING, &trans->status)) { + struct iwl_fw_error_dump_mode mode; + + mode = trans->restart.mode; + mode.context = IWL_ERR_CONTEXT_FROM_OPMODE; + trans->restart.mode.context = IWL_ERR_CONTEXT_ABORT; + + iwl_op_mode_dump_error(trans->op_mode, &mode); + } + + if (trans->trans_cfg->gen2) + iwl_trans_pcie_gen2_stop_device(trans); + else + iwl_trans_pcie_stop_device(trans); + + trans->state = IWL_TRANS_NO_FW; +} +IWL_EXPORT_SYMBOL(iwl_trans_stop_device); + +int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int queue) +{ + if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) + return -EIO; + + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return -EIO; + + if (trans->trans_cfg->gen2) + return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue); + + return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue); +} +IWL_EXPORT_SYMBOL(iwl_trans_tx); + +void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, + struct sk_buff_head *skbs, bool is_flush) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return; + + iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush); +} +IWL_EXPORT_SYMBOL(iwl_trans_reclaim); + +void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd) +{ + iwl_trans_pcie_txq_disable(trans, queue, configure_scd); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_disable); + +bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int queue_wdg_timeout) +{ + might_sleep(); + + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return false; + + return iwl_trans_pcie_txq_enable(trans, queue, ssn, + cfg, queue_wdg_timeout); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg); + +int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return -EIO; + + return iwl_trans_pcie_wait_txq_empty(trans, queue); +} +IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty); + +int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return -EIO; + + return iwl_trans_pcie_wait_txqs_empty(trans, txqs); +} +IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty); + +void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, bool freeze) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return; + + iwl_pcie_freeze_txq_timer(trans, txqs, freeze); +} +IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer); + +void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, + int txq_id, bool shared_mode) +{ + iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode); + +#ifdef CONFIG_IWLWIFI_DEBUGFS +void iwl_trans_debugfs_cleanup(struct iwl_trans *trans) +{ + iwl_trans_pcie_debugfs_cleanup(trans); +} +IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup); +#endif + +void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return; + + iwl_pcie_set_q_ptrs(trans, queue, ptr); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs); + +int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, + u8 tid, int size, unsigned int wdg_timeout) +{ + might_sleep(); + + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return -EIO; + + return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid, + size, wdg_timeout); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc); + +void iwl_trans_txq_free(struct iwl_trans *trans, int queue) +{ + iwl_txq_dyn_free(trans, queue); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_free); + +int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data) +{ + return iwl_trans_pcie_rxq_dma_data(trans, queue, data); +} +IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data); + +int iwl_trans_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_data, + const struct iwl_ucode_capabilities *capa) +{ + return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa); +} +IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm); + +void iwl_trans_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm); + +int iwl_trans_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa) +{ + return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads, + capa); +} +IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power); + +void iwl_trans_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 5789a8735976..f6234065dbdd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -26,11 +26,9 @@ * DOC: Transport layer - what is it ? * * The transport layer is the layer that deals with the HW directly. It provides - * an abstraction of the underlying HW to the upper layer. The transport layer - * doesn't provide any policy, algorithm or anything of this kind, but only - * mechanisms to make the HW do something. It is not completely stateless but - * close to it. - * We will have an implementation for each different supported bus. + * the PCIe access to the underlying hardwarwe. The transport layer doesn't + * provide any policy, algorithm or anything of this kind, but only mechanisms + * to make the HW do something. It is not completely stateless but close to it. */ /** @@ -122,6 +120,7 @@ enum CMD_MODE { CMD_BLOCK_TXQS = BIT(3), CMD_SEND_IN_D3 = BIT(4), }; +#define CMD_MODE_BITS 5 #define DEF_CMD_PAYLOAD_SIZE 320 @@ -131,6 +130,11 @@ enum CMD_MODE { * For allocation of the command and tx queues, this establishes the overall * size of the largest command we send to uCode, except for commands that * aren't fully copied and use other TFD space. + * + * @hdr: command header + * @payload: payload for the command + * @hdr_wide: wide command header + * @payload_wide: payload for the wide command */ struct iwl_device_cmd { union { @@ -167,12 +171,6 @@ struct iwl_device_tx_cmd { */ #define IWL_MAX_CMD_TBS_PER_TFD 2 -/* We need 2 entries for the TX command and header, and another one might - * be needed for potential data in the SKB's head. The remaining ones can - * be used for frags. - */ -#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3) - /** * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command * @@ -281,7 +279,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) #define IWL_9000_MAX_RX_HW_QUEUES 1 /** - * enum iwl_wowlan_status - WoWLAN image/device status + * enum iwl_d3_status - WoWLAN image/device status * @IWL_D3_STATUS_ALIVE: firmware is still running after resume * @IWL_D3_STATUS_RESET: device was reset while suspended */ @@ -299,12 +297,13 @@ enum iwl_d3_status { * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode * @STATUS_FW_ERROR: the fw is in error state - * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands - * are sent - * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once, * e.g. for testing + * @STATUS_IN_SW_RESET: device is undergoing reset, cleared by opmode + * via iwl_trans_finish_sw_reset() + * @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump + * the firmware state yet */ enum iwl_trans_status { STATUS_SYNC_HCMD_ACTIVE, @@ -314,10 +313,10 @@ enum iwl_trans_status { STATUS_RFKILL_HW, STATUS_RFKILL_OPMODE, STATUS_FW_ERROR, - STATUS_TRANS_GOING_IDLE, - STATUS_TRANS_IDLE, STATUS_TRANS_DEAD, STATUS_SUPPRESS_CMD_ERROR_ONCE, + STATUS_IN_SW_RESET, + STATUS_RESET_PENDING, }; static inline int @@ -329,7 +328,6 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) case IWL_AMSDU_4K: return get_order(4 * 1024); case IWL_AMSDU_8K: - return get_order(8 * 1024); case IWL_AMSDU_12K: return get_order(16 * 1024); default: @@ -393,7 +391,6 @@ struct iwl_dump_sanitize_ops { * @cmd_queue: the index of the command queue. * Must be set before start_fw. * @cmd_fifo: the fifo for host commands - * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. * @no_reclaim_cmds: Some devices erroneously don't set the * SEQ_RX_FRAME bit on some notifications, this is the * list of such notifications to filter. Max length is @@ -419,7 +416,6 @@ struct iwl_trans_config { u8 cmd_queue; u8 cmd_fifo; - unsigned int cmd_q_wdg_timeout; const u8 *no_reclaim_cmds; unsigned int n_no_reclaim_cmds; @@ -482,176 +478,6 @@ struct iwl_pnvm_image { }; /** - * struct iwl_trans_ops - transport specific operations - * - * All the handlers MUST be implemented - * - * @start_hw: starts the HW. From that point on, the HW can send interrupts. - * May sleep. - * @op_mode_leave: Turn off the HW RF kill indication if on - * May sleep - * @start_fw: allocates and inits all the resources for the transport - * layer. Also kick a fw image. - * May sleep - * @fw_alive: called when the fw sends alive notification. If the fw provides - * the SCD base address in SRAM, then provide it here, or 0 otherwise. - * May sleep - * @stop_device: stops the whole device (embedded CPU put to reset) and stops - * the HW. From that point on, the HW will be stopped but will still issue - * an interrupt if the HW RF kill switch is triggered. - * This callback must do the right thing and not crash even if %start_hw() - * was called but not &start_fw(). May sleep. - * @d3_suspend: put the device into the correct mode for WoWLAN during - * suspend. This is optional, if not implemented WoWLAN will not be - * supported. This callback may sleep. - * @d3_resume: resume the device after WoWLAN, enabling the opmode to - * talk to the WoWLAN image to get its status. This is optional, if not - * implemented WoWLAN will not be supported. This callback may sleep. - * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. - * If RFkill is asserted in the middle of a SYNC host command, it must - * return -ERFKILL straight away. - * May sleep only if CMD_ASYNC is not set - * @tx: send an skb. The transport relies on the op_mode to zero the - * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all - * the CSUM will be taken care of (TCP CSUM and IP header in case of - * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP - * header if it is IPv4. - * Must be atomic - * @reclaim: free packet until ssn. Returns a list of freed packets. - * Must be atomic - * @txq_enable: setup a queue. To setup an AC queue, use the - * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before - * this one. The op_mode must not configure the HCMD queue. The scheduler - * configuration may be %NULL, in which case the hardware will not be - * configured. If true is returned, the operation mode needs to increment - * the sequence number of the packets routed to this queue because of a - * hardware scheduler bug. May sleep. - * @txq_disable: de-configure a Tx queue to send AMPDUs - * Must be atomic - * @txq_set_shared_mode: change Tx queue shared/unshared marking - * @wait_tx_queues_empty: wait until tx queues are empty. May sleep. - * @wait_txq_empty: wait until specific tx queue is empty. May sleep. - * @freeze_txq_timer: prevents the timer of the queue from firing until the - * queue is set to awake. Must be atomic. - * @write8: write a u8 to a register at offset ofs from the BAR - * @write32: write a u32 to a register at offset ofs from the BAR - * @read32: read a u32 register at offset ofs from the BAR - * @read_prph: read a DWORD from a periphery register - * @write_prph: write a DWORD to a periphery register - * @read_mem: read device's SRAM in DWORD - * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory - * will be zeroed. - * @read_config32: read a u32 value from the device's config space at - * the given offset. - * @configure: configure parameters required by the transport layer from - * the op_mode. May be called several times before start_fw, can't be - * called after that. - * @set_pmi: set the power pmi state - * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. - * Sleeping is not allowed between grab_nic_access and - * release_nic_access. - * @release_nic_access: let the NIC go to sleep. The "flags" parameter - * must be the same one that was sent before to the grab_nic_access. - * @set_bits_mask - set SRAM register according to value and mask. - * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last - * TX'ed commands and similar. The buffer will be vfree'd by the caller. - * Note that the transport must fill in the proper file headers. - * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup - * of the trans debugfs - * @load_pnvm: save the pnvm data in DRAM - * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the - * context info. - * @load_reduce_power: copy reduce power table to the corresponding DRAM memory - * @set_reduce_power: set reduce power table addresses in the sratch buffer - * @interrupts: disable/enable interrupts to transport - */ -struct iwl_trans_ops { - - int (*start_hw)(struct iwl_trans *iwl_trans); - void (*op_mode_leave)(struct iwl_trans *iwl_trans); - int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, - bool run_in_rfkill); - void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); - void (*stop_device)(struct iwl_trans *trans); - - int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); - int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test, bool reset); - - int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); - - int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int queue); - void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, - struct sk_buff_head *skbs, bool is_flush); - - void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr); - - bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, - const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int queue_wdg_timeout); - void (*txq_disable)(struct iwl_trans *trans, int queue, - bool configure_scd); - /* 22000 functions */ - int (*txq_alloc)(struct iwl_trans *trans, u32 flags, - u32 sta_mask, u8 tid, - int size, unsigned int queue_wdg_timeout); - void (*txq_free)(struct iwl_trans *trans, int queue); - int (*rxq_dma_data)(struct iwl_trans *trans, int queue, - struct iwl_trans_rxq_dma_data *data); - - void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, - bool shared); - - int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm); - int (*wait_txq_empty)(struct iwl_trans *trans, int queue); - void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, - bool freeze); - - void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); - void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); - u32 (*read32)(struct iwl_trans *trans, u32 ofs); - u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); - void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); - int (*read_mem)(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); - int (*write_mem)(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords); - int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val); - void (*configure)(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg); - void (*set_pmi)(struct iwl_trans *trans, bool state); - int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership); - bool (*grab_nic_access)(struct iwl_trans *trans); - void (*release_nic_access)(struct iwl_trans *trans); - void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, - u32 value); - - struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, - u32 dump_mask, - const struct iwl_dump_sanitize_ops *sanitize_ops, - void *sanitize_ctx); - void (*debugfs_cleanup)(struct iwl_trans *trans); - void (*sync_nmi)(struct iwl_trans *trans); - int (*load_pnvm)(struct iwl_trans *trans, - const struct iwl_pnvm_image *pnvm_payloads, - const struct iwl_ucode_capabilities *capa); - void (*set_pnvm)(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa); - int (*load_reduce_power)(struct iwl_trans *trans, - const struct iwl_pnvm_image *payloads, - const struct iwl_ucode_capabilities *capa); - void (*set_reduce_power)(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa); - - void (*interrupts)(struct iwl_trans *trans, bool enable); - int (*imr_dma_data)(struct iwl_trans *trans, - u32 dst_addr, u64 src_addr, - u32 byte_cnt); - -}; - -/** * enum iwl_trans_state - state of the transport layer * * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed @@ -775,7 +601,7 @@ struct iwl_self_init_dram { * @imr_size: imr dram size received from fw * @sram_addr: sram address from debug tlv * @sram_size: sram size from debug tlv - * @imr2sram_remainbyte`: size remained after each dma transfer + * @imr2sram_remainbyte: size remained after each dma transfer * @imr_curr_addr: current dst address used during dma transfer * @imr_base_addr: imr address received from fw */ @@ -807,8 +633,6 @@ struct iwl_pc_data { * @n_dest_reg: num of reg_ops in %dbg_dest_tlv * @rec_on: true iff there is a fw debug recording currently active * @dest_tlv: points to the destination TLV for debug - * @conf_tlv: array of pointers to configuration TLVs for debug - * @trigger_tlv: array of pointers to triggers TLVs for debug * @lmac_error_event_table: addrs of lmacs error tables * @umac_error_event_table: addr of umac error table * @tcm_error_event_table: address(es) of TCM error table(s) @@ -822,12 +646,16 @@ struct iwl_pc_data { * @fw_mon: DRAM buffer for firmware monitor * @hw_error: equals true if hw error interrupt was received from the FW * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location + * @unsupported_region_msk: unsupported regions out of active_regions * @active_regions: active regions * @debug_info_tlv_list: list of debug info TLVs * @time_point: array of debug time points * @periodic_trig_list: periodic triggers list * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON * @ucode_preset: preset based on ucode + * @restart_required: indicates debug restart is required + * @last_tp_resetfw: last handling of reset during debug timepoint + * @imr_data: IMR debug data allocation * @dump_file_name_ext: dump file name extension * @dump_file_name_ext_valid: dump file name extension if valid or not * @num_pc: number of program counter for cpu @@ -839,8 +667,6 @@ struct iwl_trans_debug { bool rec_on; const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; - const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; - struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv; u32 lmac_error_event_table[2]; u32 umac_error_event_table; @@ -886,7 +712,9 @@ struct iwl_dma_ptr { struct iwl_cmd_meta { /* only for SYNC commands, iff the reply skb is wanted */ struct iwl_host_cmd *source; - u32 flags; + u32 flags: CMD_MODE_BITS; + /* sg_offset is valid if it is non-zero */ + u32 sg_offset: PAGE_SHIFT; u32 tbs; }; @@ -923,6 +751,7 @@ struct iwl_pcie_first_tb_buf { * @first_tb_dma: DMA address for the first_tb_bufs start * @entries: transmit entries (driver state) * @lock: queue lock + * @reclaim_lock: reclaim lock * @stuck_timer: timer that fires if queue gets stuck * @trans: pointer back to transport (for timer) * @need_update: indicates need to update read/write index @@ -930,6 +759,7 @@ struct iwl_pcie_first_tb_buf { * @wd_timeout: queue watchdog timeout (jiffies) - per queue * @frozen: tx stuck queue timer is frozen * @frozen_expiry_remainder: remember how long until the timer fires + * @block: queue is blocked * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) * @write_ptr: 1-st empty entry (index) host_w * @read_ptr: last used entry (index) host_r @@ -938,6 +768,8 @@ struct iwl_pcie_first_tb_buf { * @id: queue id * @low_mark: low watermark, resume queue if free space more than this * @high_mark: high watermark, stop queue if free space less than this + * @overflow_q: overflow queue for handling frames that didn't fit on HW queue + * @overflow_tx: need to transmit from overflow * * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame * descriptors) and required locking structures. @@ -962,6 +794,8 @@ struct iwl_txq { struct iwl_pcie_txq_entry *entries; /* lock for syncing changes on the queue */ spinlock_t lock; + /* lock to prevent concurrent reclaim */ + spinlock_t reclaim_lock; unsigned long frozen_expiry_remainder; struct timer_list stuck_timer; struct iwl_trans *trans; @@ -985,68 +819,36 @@ struct iwl_txq { }; /** - * struct iwl_trans_txqs - transport tx queues data - * - * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) - * @page_offs: offset from skb->cb to mac header page pointer - * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer - * @queue_used - bit mask of used queues - * @queue_stopped - bit mask of stopped queues - * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler - * @queue_alloc_cmd_ver: queue allocation command version - */ -struct iwl_trans_txqs { - unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; - struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; - struct dma_pool *bc_pool; - size_t bc_tbl_size; - bool bc_table_dword; - u8 page_offs; - u8 dev_cmd_offs; - struct iwl_tso_hdr_page __percpu *tso_hdr_page; - - struct { - u8 fifo; - u8 q_id; - unsigned int wdg_timeout; - } cmd; - - struct { - u8 max_tbs; - u16 size; - u8 addr_size; - } tfd; - - struct iwl_dma_ptr scd_bc_tbls; - - u8 queue_alloc_cmd_ver; -}; - -/** * struct iwl_trans - transport common data * - * @csme_own - true if we couldn't get ownership on the device - * @ops - pointer to iwl_trans_ops - * @op_mode - pointer to the op_mode + * @csme_own: true if we couldn't get ownership on the device + * @op_mode: pointer to the op_mode * @trans_cfg: the trans-specific configuration part - * @cfg - pointer to the configuration - * @drv - pointer to iwl_drv + * @cfg: pointer to the configuration + * @drv: pointer to iwl_drv + * @state: current device state * @status: a bit-mask of transport status flags - * @dev - pointer to struct device * that represents the device + * @dev: pointer to struct device * that represents the device * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. - * @hw_rf_id a u32 with the device RF ID - * @hw_crf_id a u32 with the device CRF ID - * @hw_wfpm_id a u32 with the device wfpm ID + * @hw_rf_id: a u32 with the device RF ID + * @hw_cnv_id: a u32 with the device CNV ID + * @hw_crf_id: a u32 with the device CRF ID + * @hw_wfpm_id: a u32 with the device wfpm ID * @hw_id: a u32 with the ID of the device / sub-device. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. + * @sku_id: the SKU identifier (for PNVM matching) + * @pnvm_loaded: indicates PNVM was loaded + * @hw_rev: the revision data of the HW * @hw_rev_step: The mac step of the HW * @pm_support: set to true in start_hw if link pm is supported * @ltr_enabled: set to true if the LTR is enabled * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed + * @reduce_power_loaded: indicates reduced power section was loaded * @failed_to_load_reduce_power_image: set to true if pnvm loading failed + * @command_groups: pointer to command group name list array + * @command_groups_size: array size of @command_groups * @wide_cmd_header: true when ucode supports wide command header format * @wait_command_queue: wait queue for sync commands * @num_rx_queues: number of RX queues allocated by the transport; @@ -1055,23 +857,40 @@ struct iwl_trans_txqs { * @iml: a pointer to the image loader itself * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * The user should use iwl_trans_{alloc,free}_tx_cmd. + * @dev_cmd_pool_name: name for the TX command allocation pool + * @dbgfs_dir: iwlwifi debugfs base dir for this device + * @sync_cmd_lockdep_map: lockdep map for checking sync commands * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before * starting the firmware, used for tracing * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the * start of the 802.11 header in the @rx_mpdu_cmd + * @dbg: additional debug data, see &struct iwl_trans_debug + * @init_dram: FW initialization DMA data * @system_pm_mode: the system-wide power management mode in use. * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. - * @txqs: transport tx queues data. + * @name: the device name * @mbx_addr_0_step: step address data 0 * @mbx_addr_1_step: step address data 1 * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*), * only valid for discrete (not integrated) NICs * @invalid_tx_cmd: invalid TX command buffer + * @reduced_cap_sku: reduced capability supported SKU + * @no_160: device not supporting 160 MHz + * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz + * @restart: restart worker data + * @restart.wk: restart worker + * @restart.mode: reset/restart error mode information + * @restart.during_reset: error occurred during previous software reset + * @me_recheck_wk: worker to recheck WiAMT/CSME presence + * @me_present: WiAMT/CSME is detected as present (1), not present (0) + * or unknown (-1, so can still use it as a boolean safely) + * @trans_specific: data for the specific transport this is allocated for/with + * @dsbr_urm_fw_dependent: switch to URM based on fw settings + * @dsbr_urm_permanent: switch to URM permanently */ struct iwl_trans { bool csme_own; - const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; const struct iwl_cfg_trans_params *trans_cfg; const struct iwl_cfg *cfg; @@ -1090,6 +909,11 @@ struct iwl_trans { u32 hw_id; char hw_id_str[52]; u32 sku_id[3]; + bool reduced_cap_sku; + u8 no_160:1, step_urm:1; + + u8 dsbr_urm_fw_dependent:1, + dsbr_urm_permanent:1; u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; @@ -1126,7 +950,6 @@ struct iwl_trans { enum iwl_plat_pm_mode system_pm_mode; const char *name; - struct iwl_trans_txqs txqs; u32 mbx_addr_0_step; u32 mbx_addr_1_step; @@ -1134,6 +957,15 @@ struct iwl_trans { struct iwl_dma_ptr invalid_tx_cmd; + struct { + struct work_struct wk; + struct iwl_fw_error_dump_mode mode; + bool during_reset; + } restart; + + struct delayed_work me_recheck_wk; + s8 me_present; + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[] __aligned(sizeof(void *)); @@ -1142,101 +974,29 @@ struct iwl_trans { const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); -static inline void iwl_trans_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg) -{ - trans->op_mode = trans_cfg->op_mode; +void iwl_trans_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg); - trans->ops->configure(trans, trans_cfg); - WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); -} +int iwl_trans_start_hw(struct iwl_trans *trans); -static inline int iwl_trans_start_hw(struct iwl_trans *trans) -{ - might_sleep(); +void iwl_trans_op_mode_leave(struct iwl_trans *trans); - return trans->ops->start_hw(trans); -} +void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr); -static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) -{ - might_sleep(); +int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, + bool run_in_rfkill); - if (trans->ops->op_mode_leave) - trans->ops->op_mode_leave(trans); - - trans->op_mode = NULL; - - trans->state = IWL_TRANS_NO_FW; -} - -static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) -{ - might_sleep(); - - trans->state = IWL_TRANS_FW_ALIVE; - - trans->ops->fw_alive(trans, scd_addr); -} - -static inline int iwl_trans_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, - bool run_in_rfkill) -{ - int ret; +void iwl_trans_stop_device(struct iwl_trans *trans); - might_sleep(); +int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset); - WARN_ON_ONCE(!trans->rx_mpdu_cmd); +int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, + bool test, bool reset); - clear_bit(STATUS_FW_ERROR, &trans->status); - ret = trans->ops->start_fw(trans, fw, run_in_rfkill); - if (ret == 0) - trans->state = IWL_TRANS_FW_STARTED; - - return ret; -} - -static inline void iwl_trans_stop_device(struct iwl_trans *trans) -{ - might_sleep(); - - trans->ops->stop_device(trans); - - trans->state = IWL_TRANS_NO_FW; -} - -static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, - bool reset) -{ - might_sleep(); - if (!trans->ops->d3_suspend) - return -EOPNOTSUPP; - - return trans->ops->d3_suspend(trans, test, reset); -} - -static inline int iwl_trans_d3_resume(struct iwl_trans *trans, - enum iwl_d3_status *status, - bool test, bool reset) -{ - might_sleep(); - if (!trans->ops->d3_resume) - return -EOPNOTSUPP; - - return trans->ops->d3_resume(trans, status, test, reset); -} - -static inline struct iwl_trans_dump_data * +struct iwl_trans_dump_data * iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, const struct iwl_dump_sanitize_ops *sanitize_ops, - void *sanitize_ctx) -{ - if (!trans->ops->dump_data) - return NULL; - return trans->ops->dump_data(trans, dump_mask, - sanitize_ops, sanitize_ctx); -} + void *sanitize_ctx); static inline struct iwl_device_tx_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) @@ -1252,109 +1012,31 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, kmem_cache_free(trans->dev_cmd_pool, dev_cmd); } -static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int queue) -{ - if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) - return -EIO; - - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } - - return trans->ops->tx(trans, skb, dev_cmd, queue); -} - -static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, - int ssn, struct sk_buff_head *skbs, - bool is_flush) -{ - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return; - } +int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int queue); - trans->ops->reclaim(trans, queue, ssn, skbs, is_flush); -} +void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, + struct sk_buff_head *skbs, bool is_flush); -static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, - int ptr) -{ - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return; - } +void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr); - trans->ops->set_q_ptrs(trans, queue, ptr); -} +void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd); -static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, - bool configure_scd) -{ - trans->ops->txq_disable(trans, queue, configure_scd); -} +bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int queue_wdg_timeout); -static inline bool -iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, - const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int queue_wdg_timeout) -{ - might_sleep(); - - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return false; - } +int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data); - return trans->ops->txq_enable(trans, queue, ssn, - cfg, queue_wdg_timeout); -} +void iwl_trans_txq_free(struct iwl_trans *trans, int queue); -static inline int -iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, - struct iwl_trans_rxq_dma_data *data) -{ - if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) - return -EOPNOTSUPP; +int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, + u8 tid, int size, unsigned int wdg_timeout); - return trans->ops->rxq_dma_data(trans, queue, data); -} - -static inline void -iwl_trans_txq_free(struct iwl_trans *trans, int queue) -{ - if (WARN_ON_ONCE(!trans->ops->txq_free)) - return; - - trans->ops->txq_free(trans, queue); -} - -static inline int -iwl_trans_txq_alloc(struct iwl_trans *trans, - u32 flags, u32 sta_mask, u8 tid, - int size, unsigned int wdg_timeout) -{ - might_sleep(); - - if (WARN_ON_ONCE(!trans->ops->txq_alloc)) - return -EOPNOTSUPP; - - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } - - return trans->ops->txq_alloc(trans, flags, sta_mask, tid, - size, wdg_timeout); -} - -static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, - int queue, bool shared_mode) -{ - if (trans->ops->txq_set_shared_mode) - trans->ops->txq_set_shared_mode(trans, queue, shared_mode); -} +void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, + int txq_id, bool shared_mode); static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, int fifo, int sta_id, int tid, @@ -1387,94 +1069,43 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); } -static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, - unsigned long txqs, - bool freeze) -{ - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return; - } - - if (trans->ops->freeze_txq_timer) - trans->ops->freeze_txq_timer(trans, txqs, freeze); -} - -static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, - u32 txqs) -{ - if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) - return -EOPNOTSUPP; +void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, bool freeze); - /* No need to wait if the firmware is not alive */ - if (trans->state != IWL_TRANS_FW_ALIVE) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } +int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs); - return trans->ops->wait_tx_queues_empty(trans, txqs); -} +int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue); -static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) -{ - if (WARN_ON_ONCE(!trans->ops->wait_txq_empty)) - return -EOPNOTSUPP; +void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val); - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } +void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val); - return trans->ops->wait_txq_empty(trans, queue); -} +u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs); -static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) -{ - trans->ops->write8(trans, ofs, val); -} +u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs); -static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) -{ - trans->ops->write32(trans, ofs, val); -} +void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); -static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) -{ - return trans->ops->read32(trans, ofs); -} +int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords); -static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) -{ - return trans->ops->read_prph(trans, ofs); -} - -static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, - u32 val) -{ - return trans->ops->write_prph(trans, ofs, val); -} +int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, + u32 *val); -static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) -{ - return trans->ops->read_mem(trans, addr, buf, dwords); -} +#ifdef CONFIG_IWLWIFI_DEBUGFS +void iwl_trans_debugfs_cleanup(struct iwl_trans *trans); +#endif -#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ - do { \ - if (__builtin_constant_p(bufsize)) \ - BUILD_BUG_ON((bufsize) % sizeof(u32)); \ - iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ - } while (0) +#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ + ({ \ + if (__builtin_constant_p(bufsize)) \ + BUILD_BUG_ON((bufsize) % sizeof(u32)); \ + iwl_trans_read_mem(trans, addr, buf, \ + (bufsize) / sizeof(u32)); \ + }) -static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans, - u32 dst_addr, u64 src_addr, - u32 byte_cnt) -{ - if (trans->ops->imr_dma_data) - return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt); - return 0; -} +int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, + u64 src_addr, u32 byte_cnt); static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) { @@ -1486,11 +1117,8 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) return value; } -static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords) -{ - return trans->ops->write_mem(trans, addr, buf, dwords); -} +int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords); static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, u32 val) @@ -1498,92 +1126,92 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, return iwl_trans_write_mem(trans, addr, &val, 1); } -static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) -{ - if (trans->ops->set_pmi) - trans->ops->set_pmi(trans, state); -} +void iwl_trans_set_pmi(struct iwl_trans *trans, bool state); -static inline int iwl_trans_sw_reset(struct iwl_trans *trans, - bool retake_ownership) -{ - if (trans->ops->sw_reset) - return trans->ops->sw_reset(trans, retake_ownership); - return 0; -} +int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership); -static inline void -iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) -{ - trans->ops->set_bits_mask(trans, reg, mask, value); -} +void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value); + +bool _iwl_trans_grab_nic_access(struct iwl_trans *trans); #define iwl_trans_grab_nic_access(trans) \ __cond_lock(nic_access, \ - likely((trans)->ops->grab_nic_access(trans))) + likely(_iwl_trans_grab_nic_access(trans))) -static inline void __releases(nic_access) -iwl_trans_release_nic_access(struct iwl_trans *trans) +void __releases(nic_access) +iwl_trans_release_nic_access(struct iwl_trans *trans); + +static inline void iwl_trans_schedule_reset(struct iwl_trans *trans, + enum iwl_fw_error_type type) { - trans->ops->release_nic_access(trans); - __release(nic_access); + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return; + + trans->restart.mode.type = type; + trans->restart.mode.context = IWL_ERR_CONTEXT_WORKER; + + set_bit(STATUS_RESET_PENDING, &trans->status); + + /* + * keep track of whether or not this happened while resetting, + * by the timer the worker runs it might have finished + */ + trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET, + &trans->status); + queue_work(system_unbound_wq, &trans->restart.wk); } -static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync) +static inline void iwl_trans_fw_error(struct iwl_trans *trans, + enum iwl_fw_error_type type) { if (WARN_ON_ONCE(!trans->op_mode)) return; /* prevent double restarts due to the same erroneous FW */ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { - iwl_op_mode_nic_error(trans->op_mode, sync); trans->state = IWL_TRANS_NO_FW; + iwl_op_mode_nic_error(trans->op_mode, type); + iwl_trans_schedule_reset(trans, type); } } -static inline bool iwl_trans_fw_running(struct iwl_trans *trans) +static inline void iwl_trans_opmode_sw_reset(struct iwl_trans *trans, + enum iwl_fw_error_type type) { - return trans->state == IWL_TRANS_FW_ALIVE; + if (WARN_ON_ONCE(!trans->op_mode)) + return; + + set_bit(STATUS_IN_SW_RESET, &trans->status); + + if (!trans->op_mode->ops->sw_reset || + !trans->op_mode->ops->sw_reset(trans->op_mode, type)) + clear_bit(STATUS_IN_SW_RESET, &trans->status); } -static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) +static inline bool iwl_trans_fw_running(struct iwl_trans *trans) { - if (trans->ops->sync_nmi) - trans->ops->sync_nmi(trans); + return trans->state == IWL_TRANS_FW_ALIVE; } +void iwl_trans_sync_nmi(struct iwl_trans *trans); + void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, u32 sw_err_bit); -static inline int iwl_trans_load_pnvm(struct iwl_trans *trans, - const struct iwl_pnvm_image *pnvm_data, - const struct iwl_ucode_capabilities *capa) -{ - return trans->ops->load_pnvm(trans, pnvm_data, capa); -} +int iwl_trans_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_data, + const struct iwl_ucode_capabilities *capa); -static inline void iwl_trans_set_pnvm(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa) -{ - if (trans->ops->set_pnvm) - trans->ops->set_pnvm(trans, capa); -} +void iwl_trans_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); -static inline int iwl_trans_load_reduce_power - (struct iwl_trans *trans, - const struct iwl_pnvm_image *payloads, - const struct iwl_ucode_capabilities *capa) -{ - return trans->ops->load_reduce_power(trans, payloads, capa); -} +int iwl_trans_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa); -static inline void -iwl_trans_set_reduce_power(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa) -{ - if (trans->ops->set_reduce_power) - trans->ops->set_reduce_power(trans, capa); -} +void iwl_trans_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) { @@ -1591,10 +1219,11 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED; } -static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) +void iwl_trans_interrupts(struct iwl_trans *trans, bool enable); + +static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans) { - if (trans->ops->interrupts) - trans->ops->interrupts(trans, enable); + clear_bit(STATUS_IN_SW_RESET, &trans->status); } /***************************************************** @@ -1602,7 +1231,6 @@ static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops, const struct iwl_cfg_trans_params *cfg_trans); int iwl_trans_init(struct iwl_trans *trans); void iwl_trans_free(struct iwl_trans *trans); @@ -1612,11 +1240,29 @@ static inline bool iwl_trans_is_hw_error_value(u32 val) return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50); } +void iwl_trans_free_restart_list(void); + /***************************************************** -* driver (transport) register/unregister functions -******************************************************/ + * PCIe handling + *****************************************************/ int __must_check iwl_pci_register_driver(void); void iwl_pci_unregister_driver(void); -void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan); + +/* Note: order matters */ +enum iwl_reset_mode { + /* upper level modes: */ + IWL_RESET_MODE_SW_RESET, + IWL_RESET_MODE_REPROBE, + /* PCIE level modes: */ + IWL_RESET_MODE_REMOVE_ONLY, + IWL_RESET_MODE_RESCAN, + IWL_RESET_MODE_FUNC_RESET, + IWL_RESET_MODE_PROD_RESET, +}; + +void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode); + +int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd); #endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c new file mode 100644 index 000000000000..b14ec98e28b6 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#include <net/gso.h> +#include <linux/ieee80211.h> +#include <net/gso.h> +#include <net/ip.h> + +#include "iwl-drv.h" +#include "iwl-utils.h" + +#ifdef CONFIG_INET +int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, + netdev_features_t netdev_flags, + struct sk_buff_head *mpdus_skbs) +{ + struct sk_buff *tmp, *next; + struct ieee80211_hdr *hdr = (void *)skb->data; + char cb[sizeof(skb->cb)]; + u16 i = 0; + unsigned int tcp_payload_len; + unsigned int mss = skb_shinfo(skb)->gso_size; + bool ipv4 = (skb->protocol == htons(ETH_P_IP)); + bool qos = ieee80211_is_data_qos(hdr->frame_control); + u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; + + skb_shinfo(skb)->gso_size = num_subframes * mss; + memcpy(cb, skb->cb, sizeof(cb)); + + next = skb_gso_segment(skb, netdev_flags); + skb_shinfo(skb)->gso_size = mss; + skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; + + if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM) + return -ENOMEM; + + if (WARN_ONCE(IS_ERR(next), + "skb_gso_segment error: %d\n", (int)PTR_ERR(next))) + return PTR_ERR(next); + + if (next) + consume_skb(skb); + + skb_list_walk_safe(next, tmp, next) { + memcpy(tmp->cb, cb, sizeof(tmp->cb)); + /* + * Compute the length of all the data added for the A-MSDU. + * This will be used to compute the length to write in the TX + * command. We have: SNAP + IP + TCP for n -1 subframes and + * ETH header for n subframes. + */ + tcp_payload_len = skb_tail_pointer(tmp) - + skb_transport_header(tmp) - + tcp_hdrlen(tmp) + tmp->data_len; + + if (ipv4) + ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); + + if (tcp_payload_len > mss) { + skb_shinfo(tmp)->gso_size = mss; + skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 : + SKB_GSO_TCPV6; + } else { + if (qos) { + u8 *qc; + + if (ipv4) + ip_send_check(ip_hdr(tmp)); + + qc = ieee80211_get_qos_ctl((void *)tmp->data); + *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + } + skb_shinfo(tmp)->gso_size = 0; + } + + skb_mark_not_on_list(tmp); + __skb_queue_tail(mpdus_skbs, tmp); + i++; + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_tx_tso_segment); +#endif /* CONFIG_INET */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.h b/drivers/net/wireless/intel/iwlwifi/iwl-utils.h new file mode 100644 index 000000000000..8f1f11d06fbe --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_utils_h__ +#define __iwl_utils_h__ + +#include <net/cfg80211.h> + +#ifdef CONFIG_INET +/** + * iwl_tx_tso_segment - Segments a TSO packet into subframes for A-MSDU. + * @skb: buffer to segment. + * @num_subframes: number of subframes to create. + * @netdev_flags: netdev feature flags. + * @mpdus_skbs: list to hold the segmented subframes. + * + * This function segments a large TCP packet into subframes. + * subframes are added to the mpdus_skbs list + * + * Returns: 0 on success and negative value on failure. + */ +int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, + netdev_features_t netdev_flags, + struct sk_buff_head *mpdus_skbs); +#else +static inline +int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, + netdev_features_t netdev_flags, + struct sk_buff_head *mpdus_skbs) +{ + WARN_ON(1); + + return -1; +} +#endif /* CONFIG_INET */ + +static inline +u32 iwl_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) +{ + struct ieee80211_mgmt *mgmt = (void *)beacon; + const u8 *ie; + + if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon))) + return 0; + + frame_size -= mgmt->u.beacon.variable - beacon; + + ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size); + if (!ie) + return 0; + + return ie - beacon; +} + +#endif /* __iwl_utils_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/Makefile b/drivers/net/wireless/intel/iwlwifi/mei/Makefile index 8e3ef0347db7..e05f9421be4a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mei/Makefile @@ -5,4 +5,4 @@ iwlmei-y += net.o iwlmei-$(CONFIG_IWLWIFI_DEVICE_TRACING) += trace.o CFLAGS_trace.o := -I$(src) -ccflags-y += -I $(srctree)/$(src)/../ +ccflags-y += -I $(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h index 1f3c885aeb65..2081775e0ec9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h +++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2021-2023 Intel Corporation + * Copyright (C) 2021-2024 Intel Corporation */ #ifndef __iwl_mei_h__ @@ -283,6 +283,16 @@ struct iwl_mei_colloc_info { u8 bssid[ETH_ALEN]; }; +/** + * enum iwl_mei_sap_version - SAP version + * @IWL_MEI_SAP_VERSION_3: SAP version 3 + * @IWL_MEI_SAP_VERSION_4: SAP version 4 + */ +enum iwl_mei_sap_version { + IWL_MEI_SAP_VERSION_3 = 3, + IWL_MEI_SAP_VERSION_4 = 4, +}; + /* * struct iwl_mei_ops - driver's operations called by iwlmei * Operations will not be called more than once concurrently. @@ -456,8 +466,11 @@ void iwl_mei_device_state(bool up); /** * iwl_mei_pldr_req() - must be called before loading the fw * - * Return: 0 if the PLDR flow was successful and the fw can be loaded, negative - * value otherwise. + * Requests from the ME that it releases its potential bus access to + * the WiFi NIC so that the device can safely undergo product reset. + * + * Return: 0 if the request was successful and the device can be + * reset, a negative error value otherwise */ int iwl_mei_pldr_req(void); @@ -488,7 +501,7 @@ static inline void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_add static inline void iwl_mei_set_country_code(u16 mcc) {} -static inline void iwl_mei_set_power_limit(__le16 *power_limit) +static inline void iwl_mei_set_power_limit(const __le16 *power_limit) {} static inline int iwl_mei_register(void *priv, diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c index 1dd9106c6513..dce0b7cf7b26 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/main.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2021-2023 Intel Corporation + * Copyright (C) 2021-2024 Intel Corporation */ #include <linux/etherdevice.h> @@ -58,7 +58,6 @@ bool iwl_mei_is_connected(void) } EXPORT_SYMBOL_GPL(iwl_mei_is_connected); -#define SAP_VERSION 3 #define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */ struct iwl_sap_q_ctrl_blk { @@ -110,16 +109,19 @@ struct iwl_sap_shared_mem_ctrl_blk { #define SAP_H2M_DATA_Q_SZ 48256 #define SAP_M2H_DATA_Q_SZ 24128 -#define SAP_H2M_NOTIF_Q_SZ 2240 +#define SAP_H2M_NOTIF_Q_SZ_VER3 2240 +#define SAP_H2M_NOTIF_Q_SZ_VER4 32768 #define SAP_M2H_NOTIF_Q_SZ 62720 -#define _IWL_MEI_SAP_SHARED_MEM_SZ \ +#define _IWL_MEI_SAP_SHARED_MEM_SZ_VER3 \ (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \ - SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \ + SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER3 + \ SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4) -#define IWL_MEI_SAP_SHARED_MEM_SZ \ - (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE)) +#define _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 \ + (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \ + SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ_VER4 + \ + SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4) struct iwl_mei_shared_mem_ptrs { struct iwl_sap_shared_mem_ctrl_blk *ctrl; @@ -206,6 +208,7 @@ struct iwl_mei { * @mac_address: interface MAC address. * @nvm_address: NVM MAC address. * @priv: A pointer to iwlwifi. + * @sap_version: The SAP version to use. enum iwl_mei_sap_version. * * This used to cache the configurations coming from iwlwifi's way. The data * is cached here so that we can buffer the configuration even if we don't have @@ -220,6 +223,7 @@ struct iwl_mei_cache { u16 mcc; u8 mac_address[6]; u8 nvm_address[6]; + enum iwl_mei_sap_version sap_version; void *priv; }; @@ -238,14 +242,17 @@ static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev) #define HBM_DMA_BUF_ID_WLAN 1 -static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) +static int iwl_mei_alloc_mem_for_version(struct mei_cl_device *cldev, + enum iwl_mei_sap_version version) { struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; + u32 mem_size = roundup(version == IWL_MEI_SAP_VERSION_4 ? + _IWL_MEI_SAP_SHARED_MEM_SZ_VER4 : + _IWL_MEI_SAP_SHARED_MEM_SZ_VER3, PAGE_SIZE); - mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, - IWL_MEI_SAP_SHARED_MEM_SZ); - + iwl_mei_cache.sap_version = version; + mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, mem_size); if (IS_ERR(mem->ctrl)) { int ret = PTR_ERR(mem->ctrl); @@ -254,11 +261,30 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) return ret; } - memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ); + memset(mem->ctrl, 0, mem_size); return 0; } +static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) +{ + int ret; + + /* + * SAP version 4 uses a larger Host to MEI notif queue. + * Since it is unknown at this stage which SAP version is used by the + * CSME firmware on this platform, try to allocate the version 4 first. + * If the CSME firmware uses version 3, this allocation is expected to + * fail because the CSME firmware allocated less memory for our driver. + */ + ret = iwl_mei_alloc_mem_for_version(cldev, IWL_MEI_SAP_VERSION_4); + if (ret) + ret = iwl_mei_alloc_mem_for_version(cldev, + IWL_MEI_SAP_VERSION_3); + + return ret; +} + static void iwl_mei_init_shared_mem(struct iwl_mei *mei) { struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; @@ -277,7 +303,9 @@ static void iwl_mei_init_shared_mem(struct iwl_mei *mei) h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = cpu_to_le32(SAP_H2M_DATA_Q_SZ); h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = - cpu_to_le32(SAP_H2M_NOTIF_Q_SZ); + iwl_mei_cache.sap_version == IWL_MEI_SAP_VERSION_3 ? + cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER3) : + cpu_to_le32(SAP_H2M_NOTIF_Q_SZ_VER4); m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = cpu_to_le32(SAP_M2H_DATA_Q_SZ); m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = @@ -647,7 +675,7 @@ iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev, return; } - if (rsp->supported_version != SAP_VERSION) { + if (rsp->supported_version != iwl_mei_cache.sap_version) { dev_err(&cldev->dev, "didn't get the expected version: got %d\n", rsp->supported_version); @@ -1281,7 +1309,7 @@ static int iwl_mei_send_start(struct mei_cl_device *cldev) .hdr.type = cpu_to_le32(SAP_ME_MSG_START), .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), .hdr.len = cpu_to_le32(sizeof(msg)), - .supported_versions[0] = SAP_VERSION, + .supported_versions[0] = iwl_mei_cache.sap_version, .init_data_seq_num = cpu_to_le16(0x100), .init_notif_seq_num = cpu_to_le16(0x800), }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 593fe28d89cf..59751f123571 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IWLMVM) += iwlmvm.o +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += scan.o time-event.o rs.o rs-fw.o @@ -12,7 +13,7 @@ iwlmvm-y += ptp.o iwlmvm-y += time-sync.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwlmvm-$(CONFIG_PM) += d3.o +iwlmvm-$(CONFIG_PM_SLEEP) += d3.o iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o -ccflags-y += -I $(srctree)/$(src)/../ +subdir-ccflags-y += -I $(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c index 458b97930059..58e9a940024d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2012-2014, 2020 Intel Corporation * Copyright (C) 2016 Intel Deutschland GmbH - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022, 2024 Intel Corporation */ #include <net/mac80211.h> #include "fw-api.h" @@ -158,9 +158,8 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ret = iwl_mvm_binding_update(mvm, vif, mvmvif->deflink.phy_ctxt, false); - if (!ret) - if (iwl_mvm_sf_update(mvm, vif, true)) - IWL_ERR(mvm, "Failed to update SF state\n"); + if (!ret && iwl_mvm_sf_update(mvm, vif, true)) + IWL_ERR(mvm, "Failed to update SF state\n"); return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 9fe1761691ec..21641d41a958 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2013-2014, 2018-2020, 2022-2023 Intel Corporation + * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH */ #include <linux/ieee80211.h> @@ -181,6 +181,9 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, struct iwl_mvm_sta *mvmsta; u32 value; + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return 0; + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (!mvmsta) return 0; @@ -205,7 +208,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, } struct iwl_bt_iterator_data { - struct iwl_bt_coex_profile_notif *notif; + struct iwl_bt_coex_prof_old_notif *notif; struct iwl_mvm *mvm; struct ieee80211_chanctx_conf *primary; struct ieee80211_chanctx_conf *secondary; @@ -216,15 +219,13 @@ struct iwl_bt_iterator_data { static inline void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, + struct iwl_mvm_vif_link_info *link_info, bool enable, int rssi) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->bf_data.last_bt_coex_event = rssi; - mvmvif->bf_data.bt_coex_max_thold = + link_info->bf_data.last_bt_coex_event = rssi; + link_info->bf_data.bt_coex_max_thold = enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; - mvmvif->bf_data.bt_coex_min_thold = + link_info->bf_data.bt_coex_min_thold = enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; } @@ -252,6 +253,93 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm, swap(data->primary, data->secondary); } +/* + * This function receives the LB link id and checks if eSR should be + * enabled or disabled (due to BT coex) + */ +bool +iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + s32 link_rssi, + bool primary) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool have_wifi_loss_rate = + iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + BT_PROFILE_NOTIFICATION, 0) > 4 || + iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP, + PROFILE_NOTIF, 0) >= 1; + u8 wifi_loss_mid_high_rssi; + u8 wifi_loss_low_rssi; + u8 wifi_loss_rate; + + if (iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP, + PROFILE_NOTIF, 0) >= 1) { + /* For now, we consider 2.4 GHz band / ANT_A only */ + wifi_loss_mid_high_rssi = + mvm->last_bt_wifi_loss.wifi_loss_mid_high_rssi[PHY_BAND_24][0]; + wifi_loss_low_rssi = + mvm->last_bt_wifi_loss.wifi_loss_low_rssi[PHY_BAND_24][0]; + } else { + wifi_loss_mid_high_rssi = mvm->last_bt_notif.wifi_loss_mid_high_rssi; + wifi_loss_low_rssi = mvm->last_bt_notif.wifi_loss_low_rssi; + } + + if (wifi_loss_low_rssi == BT_OFF) + return true; + + if (primary) + return false; + + /* The feature is not supported */ + if (!have_wifi_loss_rate) + return true; + + + /* + * In case we don't know the RSSI - take the lower wifi loss, + * so we will more likely enter eSR, and if RSSI is low - + * we will get an update on this and exit eSR. + */ + if (!link_rssi) + wifi_loss_rate = wifi_loss_mid_high_rssi; + + else if (mvmvif->esr_active) + /* RSSI needs to get really low to disable eSR... */ + wifi_loss_rate = + link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ? + wifi_loss_low_rssi : + wifi_loss_mid_high_rssi; + else + /* ...And really high before we enable it back */ + wifi_loss_rate = + link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ? + wifi_loss_low_rssi : + wifi_loss_mid_high_rssi; + + return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH; +} + +void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + int link_id) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id]; + + if (!ieee80211_vif_is_mld(vif) || + !iwl_mvm_vif_from_mac80211(vif)->authorized || + WARN_ON(!link)) + return; + + if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, + (s8)link->beacon_stats.avg_signal, + link_id == iwl_mvm_get_primary_link(vif))) + /* In case we decided to exit eSR - stay with the primary */ + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX, + iwl_mvm_get_primary_link(vif)); +} + static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_bt_iterator_data *data, @@ -291,12 +379,14 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, smps_mode, link_id); iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false); - /* FIXME: should this be per link? */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); + iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false, + 0); } return; } + iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2)) min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC; else @@ -385,13 +475,12 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF || !vif->cfg.assoc) { iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false); - /* FIXME: should this be per link? */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); + iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false, 0); return; } /* try to get the avg rssi from fw */ - ave_rssi = mvmvif->bf_data.ave_beacon_signal; + ave_rssi = link_info->bf_data.ave_beacon_signal; /* if the RSSI isn't valid, fake it is very low */ if (!ave_rssi) @@ -407,7 +496,7 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, } /* Begin to monitor the RSSI: it may influence the reduced Tx power */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); + iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, true, ave_rssi); } /* must be called under rcu_read_lock */ @@ -436,6 +525,32 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id); } +/* must be called under rcu_read_lock */ +static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = _data; + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + for_each_vif_active_link(vif, link_conf, link_id) { + struct ieee80211_chanctx_conf *chanctx_conf = + rcu_dereference_check(link_conf->chanctx_conf, + lockdep_is_held(&mvm->mutex)); + + if ((!chanctx_conf || + chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) + continue; + + iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id); + } +} + static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) { struct iwl_bt_iterator_data data = { @@ -454,6 +569,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bt_notif_iterator, &data); + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + rcu_read_unlock(); + return; + } + iwl_mvm_bt_coex_tcm_based_ci(mvm, &data); if (data.primary) { @@ -513,11 +633,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) } } -void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) +void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; + struct iwl_bt_coex_prof_old_notif *notif = (void *)pkt->data; IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); @@ -534,6 +654,22 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, iwl_mvm_bt_coex_notif_handle(mvm); } +void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + const struct iwl_rx_packet *pkt = rxb_addr(rxb); + const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data; + + lockdep_assert_held(&mvm->mutex); + + mvm->last_bt_wifi_loss = *notif; + + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_bt_coex_notif_iterator, + mvm); +} + void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data rssi_event) { @@ -550,7 +686,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * Rssi update while not associated - can happen since the statistics * are handled asynchronously */ - if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) + if (mvmvif->deflink.ap_sta_id == IWL_INVALID_STA) return; /* No BT - reports should be disabled */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index c832068b5718..776600ddaea6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH - * Copyright (C) 2013-2014, 2018-2023 Intel Corporation + * Copyright (C) 2013-2014, 2018-2024 Intel Corporation * Copyright (C) 2015 Intel Deutschland GmbH */ #ifndef __MVM_CONSTANTS_H @@ -11,6 +11,15 @@ #include "fw-api.h" #define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20 +#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69 +#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63 +#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0 +#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30 +#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5 +#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5 +#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 15 +#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11 +#define IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH -72 #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) @@ -18,7 +27,7 @@ #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ -#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 +#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 1 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ @@ -50,8 +59,6 @@ #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 -#define IWL_MVM_HW_CSUM_DISABLE 0 -#define IWL_MVM_PARSE_NVM 0 #define IWL_MVM_ADWELL_ENABLE 1 #define IWL_MVM_ADWELL_MAX_BUDGET 0 #define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */ @@ -95,6 +102,7 @@ #define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE #define IWL_MVM_FTM_INITIATOR_DYNACK true #define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false +#define IWL_MVM_FTM_TEST_INCORRECT_SAC false #define IWL_MVM_FTM_R2I_MAX_REP 7 #define IWL_MVM_FTM_I2R_MAX_REP 7 #define IWL_MVM_FTM_R2I_MAX_STS 1 @@ -104,12 +112,11 @@ #define IWL_MVM_FTM_INITIATOR_SECURE_LTF false #define IWL_MVM_FTM_RESP_NDP_SUPPORT true #define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true -#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5 +#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7 #define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20 -#define IWL_MVM_USE_NSSN_SYNC 0 #define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false #define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40 /* 20016 pSec is 6 meter RTT, meaning 3 meter range */ @@ -119,6 +126,17 @@ #define IWL_MVM_DISABLE_AP_FILS false #define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */ #define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ +#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16 #define IWL_MVM_AUTO_EML_ENABLE true +#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67 +#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71 +#define IWL_MVM_HIGH_RSSI_THRESH_40MHZ -64 +#define IWL_MVM_LOW_RSSI_THRESH_40MHZ -67 +#define IWL_MVM_HIGH_RSSI_THRESH_80MHZ -61 +#define IWL_MVM_LOW_RSSI_THRESH_80MHZ -74 +#define IWL_MVM_HIGH_RSSI_THRESH_160MHZ -58 +#define IWL_MVM_LOW_RSSI_THRESH_160MHZ -61 + +#define IWL_MVM_ENTER_ESR_TPT_THRESH 400 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 05b64176859e..82ca7f8b1bb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -450,9 +450,9 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw, } static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct iwl_mvm_vif_link_info *mvm_link) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM, IWL_FW_CMD_VER_UNKNOWN); int ret; @@ -461,16 +461,14 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, struct wowlan_key_rsc_v5_data data = {}; int i; - data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL); + data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL); if (!data.rsc) return -ENOMEM; - memset(data.rsc, 0xff, sizeof(*data.rsc)); - for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++) data.rsc->mcast_key_id_map[i] = IWL_MCAST_KEY_MAP_INVALID; - data.rsc->sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id); + data.rsc->sta_id = cpu_to_le32(mvm_link->ap_sta_id); ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_rsc_v5_data, @@ -494,7 +492,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, if (ver == 4) { size = sizeof(*data.rsc_tsc); data.rsc_tsc->sta_id = - cpu_to_le32(mvmvif->deflink.ap_sta_id); + cpu_to_le32(mvm_link->ap_sta_id); } else { /* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */ size = sizeof(data.rsc_tsc->params); @@ -597,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, void *_data) { struct wowlan_key_gtk_type_iter *data = _data; + __le32 *cipher = NULL; + + if (key->keyidx == 4 || key->keyidx == 5) + cipher = &data->kek_kck_cmd->igtk_cipher; + if (key->keyidx == 6 || key->keyidx == 7) + cipher = &data->kek_kck_cmd->bigtk_cipher; switch (key->cipher) { default: @@ -608,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); + if (cipher) + *cipher = cpu_to_le32(STA_KEY_FLG_GCMP); return; case WLAN_CIPHER_SUITE_AES_CMAC: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + if (cipher) + *cipher = cpu_to_le32(STA_KEY_FLG_CCM); return; case WLAN_CIPHER_SUITE_CCMP: if (!sta) @@ -668,10 +675,9 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm, } static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, + struct iwl_mvm_vif_link_info *mvm_link, struct cfg80211_wowlan *wowlan) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_wowlan_patterns_cmd *pattern_cmd; struct iwl_host_cmd cmd = { .id = WOWLAN_PATTERNS, @@ -693,7 +699,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, pattern_cmd->n_patterns = wowlan->n_patterns; if (ver >= 3) - pattern_cmd->sta_id = mvmvif->deflink.ap_sta_id; + pattern_cmd->sta_id = mvm_link->ap_sta_id; for (i = 0; i < wowlan->n_patterns; i++) { int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); @@ -723,14 +729,15 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_chanctx_conf *ctx; u8 chains_static, chains_dynamic; - struct cfg80211_chan_def chandef; + struct cfg80211_chan_def chandef, ap_def; int ret, i; struct iwl_binding_cmd_v1 binding_cmd = {}; struct iwl_time_quota_cmd quota_cmd = {}; struct iwl_time_quota_data *quota; u32 status; - if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) + if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm) || + ieee80211_vif_is_mld(vif))) return -EINVAL; /* add back the PHY */ @@ -744,12 +751,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EINVAL; } chandef = ctx->def; + ap_def = ctx->ap; chains_static = ctx->rx_chains_static; chains_dynamic = ctx->rx_chains_dynamic; rcu_read_unlock(); ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, &chandef, - chains_static, chains_dynamic); + &ap_def, chains_static, chains_dynamic); if (ret) return ret; @@ -914,7 +922,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) static int iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, - struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct iwl_wowlan_config_cmd_v6 *wowlan_config_cmd, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, struct ieee80211_sta *ap_sta) { @@ -927,6 +935,9 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, wowlan_config_cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; + if (ap_sta->mfp) + wowlan_config_cmd->flags |= IS_11W_ASSOC; + if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) { /* Query the last used seqno and set it */ int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); @@ -937,7 +948,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); } - iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); + if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 7) + iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); if (wowlan->disconnect) wowlan_config_cmd->wakeup_filter |= @@ -987,7 +999,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, } static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct iwl_mvm_vif_link_info *mvm_link) { bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); @@ -1016,7 +1029,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, return -EIO; } - ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif); + ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif, mvm_link); if (ret) return ret; @@ -1030,7 +1043,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, if (ver == 2) { size = sizeof(tkip_data.tkip); tkip_data.tkip.sta_id = - cpu_to_le32(mvmvif->deflink.ap_sta_id); + cpu_to_le32(mvm_link->ap_sta_id); } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); } else { @@ -1079,7 +1092,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len); kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm); - kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id); + kek_kck_cmd.sta_id = cpu_to_le32(mvm_link->ap_sta_id); if (cmd_ver == 4) { cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4); @@ -1110,15 +1123,16 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, static int iwl_mvm_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, - struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct iwl_wowlan_config_cmd_v6 *wowlan_config_cmd_v6, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_vif_link_info *mvm_link, struct ieee80211_sta *ap_sta) { int ret; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - mvm->offload_tid = wowlan_config_cmd->offloading_tid; + mvm->offload_tid = wowlan_config_cmd_v6->offloading_tid; if (!unified_image) { ret = iwl_mvm_switch_to_d3(mvm); @@ -1130,25 +1144,43 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, return ret; } - ret = iwl_mvm_wowlan_config_key_params(mvm, vif); + ret = iwl_mvm_wowlan_config_key_params(mvm, vif, mvm_link); if (ret) return ret; - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, - sizeof(*wowlan_config_cmd), - wowlan_config_cmd); + if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) > 6) { + struct iwl_wowlan_config_cmd wowlan_config_cmd = { + .wakeup_filter = wowlan_config_cmd_v6->wakeup_filter, + .wowlan_ba_teardown_tids = + wowlan_config_cmd_v6->wowlan_ba_teardown_tids, + .is_11n_connection = + wowlan_config_cmd_v6->is_11n_connection, + .offloading_tid = wowlan_config_cmd_v6->offloading_tid, + .flags = wowlan_config_cmd_v6->flags, + .sta_id = wowlan_config_cmd_v6->sta_id, + }; + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, + sizeof(wowlan_config_cmd), + &wowlan_config_cmd); + } else { + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, + sizeof(*wowlan_config_cmd_v6), + wowlan_config_cmd_v6); + } if (ret) return ret; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE)) - ret = iwl_mvm_send_patterns(mvm, vif, wowlan); + ret = iwl_mvm_send_patterns(mvm, mvm_link, wowlan); else ret = iwl_mvm_send_patterns_v1(mvm, wowlan); if (ret) return ret; - return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); + return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0, + mvm_link->ap_sta_id); } static int @@ -1223,6 +1255,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, struct ieee80211_vif *vif = NULL; struct iwl_mvm_vif *mvmvif = NULL; struct ieee80211_sta *ap_sta = NULL; + struct iwl_mvm_vif_link_info *mvm_link; struct iwl_d3_manager_config d3_cfg_cmd_data = { /* * Program the minimum sleep time to 10 seconds, as many @@ -1251,21 +1284,29 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, return -EINVAL; } + vif = iwl_mvm_get_bss_vif(mvm); + if (IS_ERR_OR_NULL(vif)) + return 1; + + ret = iwl_mvm_block_esr_sync(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN); + if (ret) + return ret; + mutex_lock(&mvm->mutex); set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); synchronize_net(); - vif = iwl_mvm_get_bss_vif(mvm); - if (IS_ERR_OR_NULL(vif)) { - ret = 1; + mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvm_link = mvmvif->link[iwl_mvm_get_primary_link(vif)]; + if (WARN_ON_ONCE(!mvm_link)) { + ret = -EINVAL; goto out_noreset; } - mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) { + if (mvm_link->ap_sta_id == IWL_INVALID_STA) { /* if we're not associated, this must be netdetect */ if (!wowlan->nd_config) { ret = 1; @@ -1279,14 +1320,14 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, mvm->net_detect = true; } else { - struct iwl_wowlan_config_cmd wowlan_config_cmd = { + struct iwl_wowlan_config_cmd_v6 wowlan_config_cmd = { .offloading_tid = 0, }; - wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id; + wowlan_config_cmd.sta_id = mvm_link->ap_sta_id; ap_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id], + mvm->fw_id_to_mac_id[mvm_link->ap_sta_id], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(ap_sta)) { ret = -EINVAL; @@ -1303,7 +1344,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, if (ret) goto out_noreset; ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, - vif, mvmvif, ap_sta); + vif, mvmvif, mvm_link, ap_sta); if (ret) goto out; @@ -1354,13 +1395,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, if (ret < 0) { iwl_mvm_free_nd(mvm); - if (!unified_image) { - if (mvm->fw_restart > 0) { - mvm->fw_restart--; - ieee80211_restart_hw(mvm->hw); - } - } - clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); } out_noreset: @@ -1375,7 +1409,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) iwl_mvm_pause_tcm(mvm, true); + mutex_lock(&mvm->mutex); iwl_fw_runtime_suspend(&mvm->fwrt); + mutex_unlock(&mvm->mutex); return __iwl_mvm_suspend(hw, wowlan, false); } @@ -1400,6 +1436,7 @@ struct iwl_wowlan_status_data { u16 non_qos_seq_ctr; u16 qos_seq_ctr[8]; u8 tid_tear_down; + u8 tid_offloaded_tx; struct { /* including RX MIC key for TKIP */ @@ -1440,6 +1477,9 @@ struct iwl_wowlan_status_data { struct iwl_multicast_key_data igtk; struct iwl_multicast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM]; + int num_mlo_keys; + struct iwl_wowlan_mlo_gtk mlo_keys[WOWLAN_MAX_MLO_KEYS]; + u8 *wake_packet; }; @@ -1469,7 +1509,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, status->pattern_number; if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH | + IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)) wakeup.disconnect = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) @@ -1493,6 +1534,9 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) wakeup.tcp_match = true; + if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC) + wakeup.unprot_deauth_disassoc = true; + if (status->wake_packet) { int pktsize = status->wake_packet_bufsize; int pktlen = status->wake_packet_length; @@ -1794,6 +1838,10 @@ static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw, void *_data) { struct iwl_mvm_d3_gtk_iter_data *data = _data; + int link_id = vif->active_links ? __ffs(vif->active_links) : -1; + + if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id) + return; if (data->unhandled_cipher) return; @@ -1846,9 +1894,12 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key, memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn)); break; case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn)); memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn)); break; + default: + WARN_ON(1); } } @@ -1879,6 +1930,10 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, struct iwl_mvm_d3_gtk_iter_data *data = _data; struct iwl_wowlan_status_data *status = data->status; s8 keyidx; + int link_id = vif->active_links ? __ffs(vif->active_links) : -1; + + if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id) + return; if (data->unhandled_cipher) return; @@ -1934,11 +1989,174 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, } } +struct iwl_mvm_d3_mlo_old_keys { + u32 cipher[IEEE80211_MLD_MAX_NUM_LINKS][WOWLAN_MLO_GTK_KEY_NUM_TYPES]; + struct ieee80211_key_conf *key[IEEE80211_MLD_MAX_NUM_LINKS][8]; +}; + +static void iwl_mvm_mlo_key_ciphers(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct iwl_mvm_d3_mlo_old_keys *old_keys = data; + enum iwl_wowlan_mlo_gtk_type key_type; + + if (key->link_id < 0) + return; + + if (WARN_ON(key->link_id >= IEEE80211_MLD_MAX_NUM_LINKS || + key->keyidx >= 8)) + return; + + if (WARN_ON(old_keys->key[key->link_id][key->keyidx])) + return; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + key_type = WOWLAN_MLO_GTK_KEY_TYPE_GTK; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + if (key->keyidx == 4 || key->keyidx == 5) { + key_type = WOWLAN_MLO_GTK_KEY_TYPE_IGTK; + break; + } else if (key->keyidx == 6 || key->keyidx == 7) { + key_type = WOWLAN_MLO_GTK_KEY_TYPE_BIGTK; + break; + } + return; + default: + /* ignore WEP/TKIP or unknown ciphers */ + return; + } + + old_keys->cipher[key->link_id][key_type] = key->cipher; + old_keys->key[key->link_id][key->keyidx] = key; +} + +static bool iwl_mvm_mlo_gtk_rekey(struct iwl_wowlan_status_data *status, + struct ieee80211_vif *vif, + struct iwl_mvm *mvm) +{ + int i; + struct iwl_mvm_d3_mlo_old_keys *old_keys; + bool ret = true; + + IWL_DEBUG_WOWLAN(mvm, "Num of MLO Keys: %d\n", status->num_mlo_keys); + if (!status->num_mlo_keys) + return true; + + old_keys = kzalloc(sizeof(*old_keys), GFP_KERNEL); + if (!old_keys) + return false; + + /* find the cipher for each mlo key */ + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mlo_key_ciphers, old_keys); + + for (i = 0; i < status->num_mlo_keys; i++) { + struct iwl_wowlan_mlo_gtk *mlo_key = &status->mlo_keys[i]; + struct ieee80211_key_conf *key, *old_key; + struct ieee80211_key_seq seq; + struct { + struct ieee80211_key_conf conf; + u8 key[32]; + } conf = {}; + u16 flags = le16_to_cpu(mlo_key->flags); + int j, link_id, key_id, key_type; + + link_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK); + key_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK); + key_type = u16_get_bits(flags, + WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK); + + if (!(vif->valid_links & BIT(link_id))) + continue; + + if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS || + key_id >= 8 || + key_type >= WOWLAN_MLO_GTK_KEY_NUM_TYPES)) + continue; + + conf.conf.cipher = old_keys->cipher[link_id][key_type]; + /* WARN_ON? */ + if (!conf.conf.cipher) + continue; + + conf.conf.keylen = 0; + switch (conf.conf.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + conf.conf.keylen = WLAN_KEY_LEN_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC; + break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256; + break; + } + + if (WARN_ON(!conf.conf.keylen || + conf.conf.keylen > sizeof(conf.key))) + continue; + + memcpy(conf.conf.key, mlo_key->key, conf.conf.keylen); + conf.conf.keyidx = key_id; + + old_key = old_keys->key[link_id][key_id]; + if (old_key) { + IWL_DEBUG_WOWLAN(mvm, + "Remove MLO key id %d, link id %d\n", + key_id, link_id); + ieee80211_remove_key(old_key); + } + + IWL_DEBUG_WOWLAN(mvm, "Add MLO key id %d, link id %d\n", + key_id, link_id); + key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id); + if (WARN_ON(IS_ERR(key))) { + ret = false; + goto out; + } + + /* + * mac80211 expects the pn in big-endian + * also note that seq is a union of all cipher types + * (ccmp, gcmp, cmac, gmac), and they all have the same + * pn field (of length 6) so just copy it to ccmp.pn. + */ + for (j = 5; j >= 0; j--) + seq.ccmp.pn[5 - j] = mlo_key->pn[j]; + + /* group keys are non-QoS and use TID 0 */ + ieee80211_set_key_rx_seq(key, 0, &seq); + } + +out: + kfree(old_keys); + return ret; +} + static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status, struct ieee80211_vif *vif, struct iwl_mvm *mvm, u32 gtk_cipher) { - int i; + int i, j; struct ieee80211_key_conf *key; struct { struct ieee80211_key_conf conf; @@ -1946,6 +2164,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status, } conf = { .conf.cipher = gtk_cipher, }; + int link_id = vif->active_links ? __ffs(vif->active_links) : -1; BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); @@ -1979,10 +2198,18 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status, memcpy(conf.conf.key, status->gtk[i].key, sizeof(status->gtk[i].key)); - key = ieee80211_gtk_rekey_add(vif, &conf.conf); + key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id); if (IS_ERR(key)) return false; - iwl_mvm_set_key_rx_seq_idx(key, status, i); + + for (j = 0; j < ARRAY_SIZE(status->gtk_seq); j++) { + if (!status->gtk_seq[j].valid || + status->gtk_seq[j].key_id != key->keyidx) + continue; + iwl_mvm_set_key_rx_seq_idx(key, status, j); + break; + } + WARN_ON(j == ARRAY_SIZE(status->gtk_seq)); } return true; @@ -2002,6 +2229,7 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status, .conf.keyidx = key_data->id, }; struct ieee80211_key_seq seq; + int link_id = vif->active_links ? __ffs(vif->active_links) : -1; if (!key_data->len) return true; @@ -2027,17 +2255,17 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status, BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key)); memcpy(conf.conf.key, key_data->key, conf.conf.keylen); - key_config = ieee80211_gtk_rekey_add(vif, &conf.conf); + key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id); if (IS_ERR(key_config)) return false; ieee80211_set_key_rx_seq(key_config, 0, &seq); if (key_config->keyidx == 4 || key_config->keyidx == 5) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int link_id = vif->active_links ? __ffs(vif->active_links) : 0; - struct iwl_mvm_vif_link_info *mvm_link = - mvmvif->link[link_id]; + struct iwl_mvm_vif_link_info *mvm_link; + link_id = link_id < 0 ? 0 : link_id; + mvm_link = mvmvif->link[link_id]; mvm_link->igtk = key_config; } @@ -2072,7 +2300,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, .status = status, }; int i; - u32 disconnection_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; @@ -2080,9 +2307,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, if (!status || !vif->bss_conf.bssid) return false; - if (status->wakeup_reasons & disconnection_reasons) - return false; - if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 || iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION, @@ -2131,18 +2355,25 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, return false; } + if (!iwl_mvm_mlo_gtk_rekey(status, vif, mvm)) + return false; + ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); } out: if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, - WOWLAN_GET_STATUSES, 0) < 10) { + WOWLAN_GET_STATUSES, + IWL_FW_CMD_VER_UNKNOWN) < 10) { mvmvif->seqno_valid = true; /* +0x10 because the set API expects next-to-use, not last-used */ mvmvif->seqno = status->non_qos_seq_ctr + 0x10; } + if (status->wakeup_reasons & disconnection_reasons) + return false; + return true; } @@ -2200,7 +2431,10 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status, static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, struct iwl_wowlan_igtk_status *data) { + int i; + BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key)); + BUILD_BUG_ON(sizeof(status->igtk.ipn) != sizeof(data->ipn)); if (!data->key_len) return; @@ -2212,7 +2446,10 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, + WOWLAN_IGTK_MIN_INDEX; memcpy(status->igtk.key, data->key, sizeof(data->key)); - memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn)); + + /* mac80211 expects big endian for memcmp() to work, convert */ + for (i = 0; i < sizeof(data->ipn); i++) + status->igtk.ipn[i] = data->ipn[sizeof(data->ipn) - i - 1]; } static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status, @@ -2251,20 +2488,73 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, struct iwl_wowlan_status_data *status, u32 len) { - u32 i; + u32 expected_len = sizeof(*data) + + data->num_mlo_link_keys * sizeof(status->mlo_keys[0]); - if (!data) { - IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n"); + if (len < expected_len) { + IWL_ERR(mvm, "Invalid WoWLAN info notification!\n"); status = NULL; return; } - if (len < sizeof(*data)) { + if (mvm->fast_resume) + return; + + iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); + iwl_mvm_convert_gtk_v3(status, data->gtk); + iwl_mvm_convert_igtk(status, &data->igtk[0]); + iwl_mvm_convert_bigtk(status, data->bigtk); + status->replay_ctr = le64_to_cpu(data->replay_ctr); + status->pattern_number = le16_to_cpu(data->pattern_number); + status->tid_offloaded_tx = data->tid_offloaded_tx; + if (IWL_FW_CHECK(mvm, + data->tid_offloaded_tx >= + ARRAY_SIZE(status->qos_seq_ctr), + "tid_offloaded_tx is out of bound %d\n", + data->tid_offloaded_tx)) + data->tid_offloaded_tx = 0; + status->qos_seq_ctr[data->tid_offloaded_tx] = + le16_to_cpu(data->qos_seq_ctr); + status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); + status->num_of_gtk_rekeys = + le32_to_cpu(data->num_of_gtk_rekeys); + status->received_beacons = le32_to_cpu(data->received_beacons); + status->tid_tear_down = data->tid_tear_down; + + if (data->num_mlo_link_keys) { + status->num_mlo_keys = data->num_mlo_link_keys; + if (IWL_FW_CHECK(mvm, + status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS, + "Too many mlo keys: %d, max %d\n", + status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS)) + status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS; + memcpy(status->mlo_keys, data->mlo_gtks, + status->num_mlo_keys * sizeof(status->mlo_keys[0])); + } +} + +static void +iwl_mvm_parse_wowlan_info_notif_v4(struct iwl_mvm *mvm, + struct iwl_wowlan_info_notif_v4 *data, + struct iwl_wowlan_status_data *status, + u32 len, bool has_mlo_keys) +{ + u32 i; + u32 expected_len = sizeof(*data); + + if (has_mlo_keys) + expected_len += (data->num_mlo_link_keys * + sizeof(status->mlo_keys[0])); + + if (len < expected_len) { IWL_ERR(mvm, "Invalid WoWLAN info notification!\n"); status = NULL; return; } + if (mvm->fast_resume) + return; + iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); iwl_mvm_convert_gtk_v3(status, data->gtk); iwl_mvm_convert_igtk(status, &data->igtk[0]); @@ -2279,6 +2569,17 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, le32_to_cpu(data->num_of_gtk_rekeys); status->received_beacons = le32_to_cpu(data->received_beacons); status->tid_tear_down = data->tid_tear_down; + + if (has_mlo_keys && data->num_mlo_link_keys) { + status->num_mlo_keys = data->num_mlo_link_keys; + if (IWL_FW_CHECK(mvm, + status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS, + "Too many mlo keys: %d, max %d\n", + status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS)) + status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS; + memcpy(status->mlo_keys, data->mlo_gtks, + status->num_mlo_keys * sizeof(status->mlo_keys[0])); + } } static void @@ -2289,12 +2590,6 @@ iwl_mvm_parse_wowlan_info_notif_v2(struct iwl_mvm *mvm, { u32 i; - if (!data) { - IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n"); - status = NULL; - return; - } - if (len < sizeof(*data)) { IWL_ERR(mvm, "Invalid WoWLAN info notification!\n"); status = NULL; @@ -2499,6 +2794,16 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, int i; bool keep = false; struct iwl_mvm_sta *mvm_ap_sta; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int link_id = vif->active_links ? __ffs(vif->active_links) : 0; + struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id]; + int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw, + PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, + IWL_FW_CMD_VER_UNKNOWN); + + if (WARN_ON(!mvm_link)) + goto out_unlock; if (!status) goto out_unlock; @@ -2506,16 +2811,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", status->wakeup_reasons); - /* still at hard-coded place 0 for D3 image */ - mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); + mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, mvm_link->ap_sta_id); if (!mvm_ap_sta) goto out_unlock; - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - u16 seq = status->qos_seq_ctr[i]; - /* firmware stores last-used value, we store next value */ - seq += 0x10; - mvm_ap_sta->tid_data[i].seq_number = seq; + /* firmware stores last-used value, we store next value */ + if (wowlan_info_ver >= 5) { + mvm_ap_sta->tid_data[status->tid_offloaded_tx].seq_number = + status->qos_seq_ctr[status->tid_offloaded_tx] + 0x10; + } else { + for (i = 0; i < IWL_MAX_TID_COUNT; i++) + mvm_ap_sta->tid_data[i].seq_number = + status->qos_seq_ctr[i] + 0x10; } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { @@ -2622,6 +2929,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, int idx) { int i; + int n_channels = 0; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { @@ -2630,7 +2938,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) - match->channels[match->n_channels++] = + match->channels[n_channels++] = mvm->nd_channels[i]->center_freq; } else { struct iwl_scan_offload_profile_match_v1 *matches = @@ -2638,9 +2946,11 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) - match->channels[match->n_channels++] = + match->channels[n_channels++] = mvm->nd_channels[i]->center_freq; } + /* We may have ended up with fewer channels than we allocated. */ + match->n_channels = n_channels; } /** @@ -2721,6 +3031,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, GFP_KERNEL); if (!net_detect || !n_matches) goto out_report_nd; + net_detect->n_matches = n_matches; + n_matches = 0; for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { struct cfg80211_wowlan_nd_match *match; @@ -2734,8 +3046,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, GFP_KERNEL); if (!match) goto out_report_nd; + match->n_channels = n_channels; - net_detect->matches[net_detect->n_matches++] = match; + net_detect->matches[n_matches++] = match; /* We inverted the order of the SSIDs in the scan * request, so invert the index here. @@ -2750,6 +3063,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i); } + /* We may have fewer matches than we allocated. */ + net_detect->n_matches = n_matches; out_report_nd: wakeup.net_detect = net_detect; @@ -2777,55 +3092,50 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, ieee80211_resume_disconnect(vif); } -static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) -{ - struct error_table_start { - /* cf. struct iwl_error_event_table */ - u32 valid; - __le32 err_id; - } err_info; - - if (!base) - return false; - - iwl_trans_read_mem_bytes(trans, base, - &err_info, sizeof(err_info)); - if (err_info.valid && err_id) - *err_id = le32_to_cpu(err_info.err_id); - - return !!err_info.valid; -} +enum rt_status { + FW_ALIVE, + FW_NEEDS_RESET, + FW_ERROR, +}; -static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +static enum rt_status iwl_mvm_check_rt_status(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { u32 err_id; /* check for lmac1 error */ - if (iwl_mvm_rt_status(mvm->trans, - mvm->trans->dbg.lmac_error_event_table[0], - &err_id)) { + if (iwl_fwrt_read_err_table(mvm->trans, + mvm->trans->dbg.lmac_error_event_table[0], + &err_id)) { if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { - struct cfg80211_wowlan_wakeup wakeup = { - .rfkill_release = true, - }; - ieee80211_report_wowlan_wakeup(vif, &wakeup, - GFP_KERNEL); + IWL_WARN(mvm, "Rfkill was toggled during suspend\n"); + if (vif) { + struct cfg80211_wowlan_wakeup wakeup = { + .rfkill_release = true, + }; + + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); + } + + return FW_NEEDS_RESET; } - return true; + return FW_ERROR; } /* check if we have lmac2 set and check for error */ - if (iwl_mvm_rt_status(mvm->trans, - mvm->trans->dbg.lmac_error_event_table[1], NULL)) - return true; + if (iwl_fwrt_read_err_table(mvm->trans, + mvm->trans->dbg.lmac_error_event_table[1], + NULL)) + return FW_ERROR; /* check for umac error */ - if (iwl_mvm_rt_status(mvm->trans, - mvm->trans->dbg.umac_error_event_table, NULL)) - return true; + if (iwl_fwrt_read_err_table(mvm->trans, + mvm->trans->dbg.umac_error_event_table, + NULL)) + return FW_ERROR; - return false; + return FW_ALIVE; } /* @@ -2843,9 +3153,12 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm, /* if FW uses status notification, status shouldn't be NULL here */ if (!d3_data->status) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA : + u8 sta_id = mvm->net_detect ? IWL_INVALID_STA : mvmvif->deflink.ap_sta_id; + /* bug - FW with MLO has status notification */ + WARN_ON(ieee80211_vif_is_mld(vif)); + d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id); } @@ -2954,7 +3267,7 @@ static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm, if (results->matched_profiles) { memcpy(results->matches, notif->matches, matches_len); - d3_data->nd_results_valid = TRUE; + d3_data->nd_results_valid = true; } /* no scan should be active at this point */ @@ -3012,6 +3325,13 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, iwl_mvm_parse_wowlan_info_notif_v2(mvm, notif_v2, d3_data->status, len); + } else if (wowlan_info_ver < 5) { + struct iwl_wowlan_info_notif_v4 *notif = + (void *)pkt->data; + + iwl_mvm_parse_wowlan_info_notif_v4(mvm, notif, + d3_data->status, len, + wowlan_info_ver > 3); } else { struct iwl_wowlan_info_notif *notif = (void *)pkt->data; @@ -3118,7 +3438,7 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test) return ret; } -#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5) +#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 3) static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm, struct iwl_d3_data *d3_data) @@ -3129,12 +3449,22 @@ static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm, WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF), WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION) }; + static const u16 d3_fast_resume_notif[] = { + WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION) + }; struct iwl_notification_wait wait_d3_notif; int ret; - iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif, - d3_resume_notif, ARRAY_SIZE(d3_resume_notif), - iwl_mvm_wait_d3_notif, d3_data); + if (mvm->fast_resume) + iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif, + d3_fast_resume_notif, + ARRAY_SIZE(d3_fast_resume_notif), + iwl_mvm_wait_d3_notif, d3_data); + else + iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif, + d3_resume_notif, + ARRAY_SIZE(d3_resume_notif), + iwl_mvm_wait_d3_notif, d3_data); ret = iwl_mvm_resume_firmware(mvm, d3_data->test); if (ret) { @@ -3174,10 +3504,21 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm); + enum rt_status rt_status; bool keep = false; mutex_lock(&mvm->mutex); + /* Apparently, the device went away and device_powered_off() was called, + * don't even try to read the rt_status, the device is currently + * inaccessible. + */ + if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) { + IWL_INFO(mvm, + "Can't resume, device_powered_off() was called during wowlan\n"); + goto err; + } + mvm->last_reset_or_resume_time_jiffies = jiffies; /* get the BSS vif pointer again */ @@ -3187,13 +3528,19 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); - if (iwl_mvm_check_rt_status(mvm, vif)) { + rt_status = iwl_mvm_check_rt_status(mvm, vif); + if (rt_status != FW_ALIVE) { set_bit(STATUS_FW_ERROR, &mvm->trans->status); - iwl_mvm_dump_nic_error_log(mvm); - iwl_dbg_tlv_time_point(&mvm->fwrt, - IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); - iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - false, 0); + if (rt_status == FW_ERROR) { + IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n"); + iwl_mvm_dump_nic_error_log(mvm); + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_FW_ASSERT, + NULL); + iwl_fw_dbg_collect_desc(&mvm->fwrt, + &iwl_dump_desc_assert, + false, 0); + } ret = 1; goto err; } @@ -3215,6 +3562,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) goto err; } + iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN); + /* after the successful handshake, we're out of D3 */ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; @@ -3317,6 +3666,81 @@ void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) device_set_wakeup_enable(mvm->trans->dev, enabled); } +void iwl_mvm_fast_suspend(struct iwl_mvm *mvm) +{ + struct iwl_d3_manager_config d3_cfg_cmd_data = {}; + int ret; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n"); + + mvm->fast_resume = true; + set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); + + WARN_ON(iwl_mvm_power_update_device(mvm)); + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SEND_IN_D3, + sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data); + if (ret) + IWL_ERR(mvm, + "fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret); + + ret = iwl_trans_d3_suspend(mvm->trans, false, false); + if (ret) + IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret); +} + +int iwl_mvm_fast_resume(struct iwl_mvm *mvm) +{ + struct iwl_d3_data d3_data = { + .notif_expected = + IWL_D3_NOTIF_D3_END_NOTIF, + }; + enum rt_status rt_status; + int ret; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_WOWLAN(mvm, "Starting the fast resume flow\n"); + + mvm->last_reset_or_resume_time_jiffies = jiffies; + iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); + + rt_status = iwl_mvm_check_rt_status(mvm, NULL); + if (rt_status != FW_ALIVE) { + set_bit(STATUS_FW_ERROR, &mvm->trans->status); + if (rt_status == FW_ERROR) { + IWL_ERR(mvm, + "iwl_mvm_check_rt_status failed, device is gone during suspend\n"); + iwl_mvm_dump_nic_error_log(mvm); + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_FW_ASSERT, + NULL); + iwl_fw_dbg_collect_desc(&mvm->fwrt, + &iwl_dump_desc_assert, + false, 0); + } + mvm->trans->state = IWL_TRANS_NO_FW; + ret = -ENODEV; + + goto out; + } + ret = iwl_mvm_d3_notif_wait(mvm, &d3_data); + + if (ret) { + IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret); + mvm->trans->state = IWL_TRANS_NO_FW; + } + +out: + clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + mvm->fast_resume = false; + + return ret; +} + #ifdef CONFIG_IWLWIFI_DEBUGFS static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) { @@ -3352,6 +3776,7 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; + unsigned long end = jiffies + 60 * HZ; u32 pme_asserted; while (true) { @@ -3365,6 +3790,12 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, if (msleep_interruptible(100)) break; + + if (time_is_before_jiffies(end)) { + IWL_ERR(mvm, + "ending pseudo-D3 with timeout after ~60 seconds\n"); + return -ETIMEDOUT; + } } return 0; @@ -3426,7 +3857,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) } const struct file_operations iwl_dbgfs_d3_test_ops = { - .llseek = no_llseek, .open = iwl_mvm_d3_test_open, .read = iwl_mvm_d3_test_read, .release = iwl_mvm_d3_test_release, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index e8b881596baf..fbe4e4a50852 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -221,7 +221,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file, mvmvif->deflink.queue_params[i].uapsd); if (vif->type == NL80211_IFTYPE_STATION && - ap_sta_id != IWL_MVM_INVALID_STA) { + ap_sta_id != IWL_INVALID_STA) { struct iwl_mvm_sta *mvm_sta; mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); @@ -381,9 +381,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf, mutex_lock(&mvm->mutex); iwl_dbgfs_update_bf(vif, param, value); if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + ret = iwl_mvm_disable_beacon_filter(mvm, vif); else - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + ret = iwl_mvm_enable_beacon_filter(mvm, vif); mutex_unlock(&mvm->mutex); return ret ?: count; @@ -407,7 +407,7 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file, }; iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); - if (mvmvif->bf_data.bf_enabled) + if (mvmvif->bf_enabled) cmd.bf_enable_beacon_filter = cpu_to_le32(1); else cmd.bf_enable_beacon_filter = 0; @@ -463,11 +463,13 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) +static ssize_t +iwl_dbgfs_low_latency_write_handle(struct wiphy *wiphy, struct file *file, + char *buf, size_t count, void *data) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *vif = data; u8 value; int ret; @@ -484,12 +486,28 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, return count; } -static ssize_t -iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) +static ssize_t iwl_dbgfs_low_latency_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; + char buf[10] = {}; + + return wiphy_locked_debugfs_write(mvm->hw->wiphy, file, + buf, sizeof(buf), user_buf, count, + iwl_dbgfs_low_latency_write_handle, + vif); +} + +static ssize_t +iwl_dbgfs_low_latency_force_write_handle(struct wiphy *wiphy, struct file *file, + char *buf, size_t count, void *data) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *vif = data; u8 value; int ret; @@ -517,6 +535,22 @@ iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf, return count; } +static ssize_t +iwl_dbgfs_low_latency_force_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[10] = {}; + + return wiphy_locked_debugfs_write(mvm->hw->wiphy, file, + buf, sizeof(buf), user_buf, count, + iwl_dbgfs_low_latency_force_write_handle, + vif); +} + static ssize_t iwl_dbgfs_low_latency_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -578,34 +612,47 @@ static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; - struct ieee80211_chanctx_conf *chanctx_conf; - struct iwl_mvm_phy_ctxt *phy_ctxt; + struct ieee80211_bss_conf *link_conf; u16 value; - int ret; + int link_id, ret = -EINVAL; ret = kstrtou16(buf, 0, &value); if (ret) return ret; mutex_lock(&mvm->mutex); - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); - /* make sure the channel context is assigned */ - if (!chanctx_conf) { + mvm->dbgfs_rx_phyinfo = value; + + for_each_vif_active_link(vif, link_conf, link_id) { + struct ieee80211_chanctx_conf *chanctx_conf; + struct cfg80211_chan_def min_def, ap_def; + struct iwl_mvm_phy_ctxt *phy_ctxt; + u8 chains_static, chains_dynamic; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(link_conf->chanctx_conf); + if (!chanctx_conf) { + rcu_read_unlock(); + continue; + } + /* A command can't be sent with RCU lock held, so copy + * everything here and use it after unlocking + */ + min_def = chanctx_conf->min_def; + ap_def = chanctx_conf->ap; + chains_static = chanctx_conf->rx_chains_static; + chains_dynamic = chanctx_conf->rx_chains_dynamic; rcu_read_unlock(); - mutex_unlock(&mvm->mutex); - return -EINVAL; - } - phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv]; - rcu_read_unlock(); + phy_ctxt = mvmvif->link[link_id]->phy_ctxt; + if (!phy_ctxt) + continue; - mvm->dbgfs_rx_phyinfo = value; + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &min_def, &ap_def, + chains_static, chains_dynamic); + } - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def, - chanctx_conf->rx_chains_static, - chanctx_conf->rx_chains_dynamic); mutex_unlock(&mvm->mutex); return ret ?: count; @@ -679,6 +726,132 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, len); } +static ssize_t iwl_dbgfs_max_tx_op_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u16 value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + mvmvif->max_tx_op = value; + mutex_unlock(&mvm->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_max_tx_op_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[10]; + int len; + + mutex_lock(&mvm->mutex); + len = scnprintf(buf, sizeof(buf), "%hu\n", mvmvif->max_tx_op); + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 action; + int ret; + + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) + return -EINVAL; + + if (kstrtou32(buf, 0, &action)) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + if (!action) { + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false); + } else if (action == 1) { + ret = iwl_mvm_int_mlo_scan(mvm, vif); + } else { + ret = -EINVAL; + } + + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_esr_disable_reason_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + unsigned long esr_mask; + char *buf; + int bufsz, pos, i; + ssize_t rv; + + mutex_lock(&mvm->mutex); + esr_mask = mvmvif->esr_disable_reason; + mutex_unlock(&mvm->mutex); + + bufsz = hweight32(esr_mask) * 32 + 40; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos = scnprintf(buf, bufsz, "EMLSR state: '0x%lx'\nreasons:\n", + esr_mask); + for_each_set_bit(i, &esr_mask, BITS_PER_LONG) + pos += scnprintf(buf + pos, bufsz - pos, " - %s\n", + iwl_get_esr_state_string(BIT(i))); + + rv = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return rv; +} + +static ssize_t iwl_dbgfs_esr_disable_reason_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 reason; + u8 block; + int ret; + + ret = sscanf(buf, "%u %hhu", &reason, &block); + if (ret < 0) + return ret; + + if (hweight16(reason) != 1 || !(reason & IWL_MVM_BLOCK_ESR_REASONS)) + return -EINVAL; + + mutex_lock(&mvm->mutex); + if (block) + iwl_mvm_block_esr(mvm, vif, reason, + iwl_mvm_get_primary_link(vif)); + else + iwl_mvm_unblock_esr(mvm, vif, reason); + mutex_unlock(&mvm->mutex); + + return count; +} + #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -692,12 +865,27 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params); MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); -MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10); + +static const struct file_operations iwl_dbgfs_low_latency_ops = { + .write = iwl_dbgfs_low_latency_write, + .read = iwl_dbgfs_low_latency_read, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +static const struct file_operations iwl_dbgfs_low_latency_force_ops = { + .write = iwl_dbgfs_low_latency_force_write, + .open = simple_open, + .llseek = generic_file_llseek, +}; + MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(max_tx_op, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32); void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -725,6 +913,11 @@ void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE_VIF(max_tx_op, mvmvif->dbgfs_dir, 0600); + debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir, + &mvmvif->ftm_unprotected); + MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE_VIF(esr_disable_reason, mvmvif->dbgfs_dir, 0600); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) @@ -735,7 +928,9 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct dentry *dbgfs_dir = vif->debugfs_dir; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[100]; + char buf[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) + + (7 + IFNAMSIZ + 1) + 6 + 1]; + char name[7 + IFNAMSIZ + 1]; /* this will happen in monitor mode */ if (!dbgfs_dir) @@ -748,10 +943,11 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * find * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/ */ - snprintf(buf, 100, "../../../%pd3/iwlmvm", dbgfs_dir); + snprintf(name, sizeof(name), "%pd", dbgfs_dir); + snprintf(buf, sizeof(buf), "../../../%pd3/iwlmvm", dbgfs_dir); - mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, - mvm->debugfs_dir, buf); + mvmvif->dbgfs_slink = + debugfs_create_symlink(name, mvm->debugfs_dir, buf); } void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index edc8204f7c0e..55d035b896e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -16,6 +16,7 @@ #include "debugfs.h" #include "iwl-modparams.h" #include "iwl-drv.h" +#include "iwl-utils.h" #include "fw/error-dump.h" #include "fw/api/phy-ctxt.h" @@ -151,37 +152,6 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, return ret; } -static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_sta *mvmsta; - int sta_id, drain, ret; - - if (!iwl_mvm_firmware_running(mvm) || - mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) - return -EIO; - - if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) - return -EINVAL; - if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations) - return -EINVAL; - if (drain < 0 || drain > 1) - return -EINVAL; - - mutex_lock(&mvm->mutex); - - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); - - if (!mvmsta) - ret = -ENOENT; - else - ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count; - - mutex_unlock(&mvm->mutex); - - return ret; -} - static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -391,9 +361,7 @@ static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct file *file, char buf[12]; u32 value; - err = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, - DSM_FUNC_ENABLE_6E, - &iwl_guid, &value); + err = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value); if (err) return err; @@ -495,7 +463,6 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_link_sta *link_sta, if (amsdu_len) { mvm_link_sta->orig_amsdu_len = link_sta->agg.max_amsdu_len; link_sta->agg.max_amsdu_len = amsdu_len; - link_sta->agg.max_amsdu_len = amsdu_len; for (i = 0; i < ARRAY_SIZE(link_sta->agg.max_tid_amsdu_len); i++) link_sta->agg.max_tid_amsdu_len[i] = amsdu_len; } else { @@ -570,224 +537,12 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } -static -int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, - int pos, int bufsz) -{ - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); - - BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); - BT_MBOX_PRINT(0, LE_PROF1, false); - BT_MBOX_PRINT(0, LE_PROF2, false); - BT_MBOX_PRINT(0, LE_PROF_OTHER, false); - BT_MBOX_PRINT(0, CHL_SEQ_N, false); - BT_MBOX_PRINT(0, INBAND_S, false); - BT_MBOX_PRINT(0, LE_MIN_RSSI, false); - BT_MBOX_PRINT(0, LE_SCAN, false); - BT_MBOX_PRINT(0, LE_ADV, false); - BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); - BT_MBOX_PRINT(0, OPEN_CON_1, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); - - BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); - BT_MBOX_PRINT(1, IP_SR, false); - BT_MBOX_PRINT(1, LE_MSTR, false); - BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); - BT_MBOX_PRINT(1, MSG_TYPE, false); - BT_MBOX_PRINT(1, SSN, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); - - BT_MBOX_PRINT(2, SNIFF_ACT, false); - BT_MBOX_PRINT(2, PAG, false); - BT_MBOX_PRINT(2, INQUIRY, false); - BT_MBOX_PRINT(2, CONN, false); - BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); - BT_MBOX_PRINT(2, DISC, false); - BT_MBOX_PRINT(2, SCO_TX_ACT, false); - BT_MBOX_PRINT(2, SCO_RX_ACT, false); - BT_MBOX_PRINT(2, ESCO_RE_TX, false); - BT_MBOX_PRINT(2, SCO_DURATION, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); - - BT_MBOX_PRINT(3, SCO_STATE, false); - BT_MBOX_PRINT(3, SNIFF_STATE, false); - BT_MBOX_PRINT(3, A2DP_STATE, false); - BT_MBOX_PRINT(3, A2DP_SRC, false); - BT_MBOX_PRINT(3, ACL_STATE, false); - BT_MBOX_PRINT(3, MSTR_STATE, false); - BT_MBOX_PRINT(3, OBX_STATE, false); - BT_MBOX_PRINT(3, OPEN_CON_2, false); - BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); - BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); - BT_MBOX_PRINT(3, INBAND_P, false); - BT_MBOX_PRINT(3, MSG_TYPE_2, false); - BT_MBOX_PRINT(3, SSN_2, false); - BT_MBOX_PRINT(3, UPDATE_REQUEST, true); - - return pos; -} - -static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; - char *buf; - int ret, pos = 0, bufsz = sizeof(char) * 1024; - - buf = kmalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - - pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); - - pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", - notif->bt_ci_compliance); - pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n", - le32_to_cpu(notif->primary_ch_lut)); - pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - pos += scnprintf(buf + pos, - bufsz - pos, "bt_activity_grading = %d\n", - le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", - notif->rrc_status & 0xF); - pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", - notif->ttc_status & 0xF); - - pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", - IWL_MVM_BT_COEX_SYNC2SCO); - pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", - IWL_MVM_BT_COEX_MPLUT); - - mutex_unlock(&mvm->mutex); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - - return ret; -} -#undef BT_MBOX_PRINT - -static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; - char buf[256]; - int bufsz = sizeof(buf); - int pos = 0; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n"); - pos += scnprintf(buf + pos, bufsz - pos, - "\tPrimary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_primary_ci)); - pos += scnprintf(buf + pos, bufsz - pos, - "\tSecondary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_secondary_ci)); - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - u32 bt_tx_prio; - - if (sscanf(buf, "%u", &bt_tx_prio) != 1) - return -EINVAL; - if (bt_tx_prio > 4) - return -EINVAL; - - mvm->bt_tx_prio = bt_tx_prio; - - return count; -} - -static ssize_t -iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - static const char * const modes_str[BT_FORCE_ANT_MAX] = { - [BT_FORCE_ANT_DIS] = "dis", - [BT_FORCE_ANT_AUTO] = "auto", - [BT_FORCE_ANT_BT] = "bt", - [BT_FORCE_ANT_WIFI] = "wifi", - }; - int ret, bt_force_ant_mode; - - ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); - if (ret < 0) - return ret; - - bt_force_ant_mode = ret; - ret = 0; - mutex_lock(&mvm->mutex); - if (mvm->bt_force_ant_mode == bt_force_ant_mode) - goto out; - - mvm->bt_force_ant_mode = bt_force_ant_mode; - IWL_DEBUG_COEX(mvm, "Force mode: %s\n", - modes_str[mvm->bt_force_ant_mode]); - - if (iwl_mvm_firmware_running(mvm)) - ret = iwl_mvm_send_bt_init_conf(mvm); - else - ret = 0; - -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - char *buff, *pos, *endpos; - static const size_t bufsz = 1024; - char _fw_name_pre[FW_NAME_PRE_BUFSIZE]; - int ret; - - buff = kmalloc(bufsz, GFP_KERNEL); - if (!buff) - return -ENOMEM; - - pos = buff; - endpos = pos + bufsz; - - pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", - iwl_drv_get_fwname_pre(mvm->trans, _fw_name_pre)); - pos += scnprintf(pos, endpos - pos, "FW: %s\n", - mvm->fwrt.fw->human_readable); - pos += scnprintf(pos, endpos - pos, "Device: %s\n", - mvm->fwrt.trans->name); - pos += scnprintf(pos, endpos - pos, "Bus: %s\n", - mvm->fwrt.dev->bus->name); - - ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); - kfree(buff); - - return ret; -} - static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; - struct iwl_mvm_tas_status_resp tas_rsp; - struct iwl_mvm_tas_status_resp *rsp = &tas_rsp; + struct iwl_mvm_tas_status_resp *rsp = NULL; static const size_t bufsz = 1024; char *buff, *pos, *endpos; const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = { @@ -823,6 +578,10 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, if (!iwl_mvm_firmware_running(mvm)) return -ENODEV; + if (iwl_fw_lookup_notif_ver(mvm->fw, DEBUG_GROUP, GET_TAS_STATUS, + 0) != 3) + return -EOPNOTSUPP; + mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); @@ -873,18 +632,26 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, rsp->tas_fw_version); pos += scnprintf(pos, endpos - pos, "Is UHB enabled for USA?: %s\n", rsp->is_uhb_for_usa_enable ? "True" : "False"); + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT)) + pos += scnprintf(pos, endpos - pos, + "Is UHB enabled for CANADA?: %s\n", + rsp->uhb_allowed_flags & + TAS_UHB_ALLOWED_CANADA ? "True" : "False"); + pos += scnprintf(pos, endpos - pos, "Current MCC: 0x%x\n", le16_to_cpu(rsp->curr_mcc)); pos += scnprintf(pos, endpos - pos, "Block list entries:"); - for (i = 0; i < APCI_WTAS_BLACK_LIST_MAX; i++) + for (i = 0; i < IWL_WTAS_BLACK_LIST_MAX; i++) pos += scnprintf(pos, endpos - pos, " 0x%x", le16_to_cpu(rsp->block_list[i])); pos += scnprintf(pos, endpos - pos, "\nOEM name: %s\n", - dmi_get_system_info(DMI_SYS_VENDOR)); + dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>"); pos += scnprintf(pos, endpos - pos, "\tVendor In Approved List: %s\n", - iwl_mvm_is_vendor_in_approved_list() ? "YES" : "NO"); + iwl_is_tas_approved() ? "YES" : "NO"); pos += scnprintf(pos, endpos - pos, "\tDo TAS Support Dual Radio?: %s\n", rsp->in_dual_radio ? "TRUE" : "FALSE"); @@ -1373,10 +1140,6 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, mutex_lock(&mvm->mutex); - /* allow one more restart that we're provoking here */ - if (mvm->fw_restart >= 0) - mvm->fw_restart++; - if (count == 6 && !strcmp(buf, "nolog\n")) { set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mvm->trans->status); @@ -1398,6 +1161,8 @@ static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, if (!iwl_mvm_firmware_running(mvm)) return -EIO; + IWL_ERR(mvm, "Triggering an NMI from debugfs\n"); + if (count == 6 && !strcmp(buf, "nolog\n")) set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); @@ -1619,6 +1384,15 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) &beacon_cmd.tim_size, beacon->data, beacon->len); + if (iwl_fw_lookup_cmd_ver(mvm->fw, + BEACON_TEMPLATE_CMD, 0) >= 14) { + u32 offset = iwl_find_ie_offset(beacon->data, + WLAN_EID_S1G_TWT, + beacon->len); + + beacon_cmd.btwt_offset = cpu_to_le32(offset); + } + iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } @@ -1698,22 +1472,6 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, return ret ?: count; } -static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) -{ - if (count == 0) - return 0; - - iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, - NULL); - - iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, - (count - 1), NULL); - - return count; -} - static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) @@ -1721,6 +1479,13 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm, if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) return -EOPNOTSUPP; + /* + * If the firmware is not running, silently succeed since there is + * no data to clear. + */ + if (!iwl_mvm_firmware_running(mvm)) + return count; + mutex_lock(&mvm->mutex); iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt); mutex_unlock(&mvm->mutex); @@ -2157,29 +1922,22 @@ MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(start_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); -MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(stations); MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(rs_data); -MVM_DEBUGFS_READ_FILE_OPS(bt_notif); -MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(fw_system_stats); -MVM_DEBUGFS_READ_FILE_OPS(fw_ver); MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver); MVM_DEBUGFS_READ_FILE_OPS(tas_get_status); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); -MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); -MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); -MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_clear, 64); MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, @@ -2363,7 +2121,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) spin_lock_init(&mvm->drv_stats_lock); MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200); - MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400); @@ -2372,21 +2129,15 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(start_ctdp, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400); - MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400); - MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600); - MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); - MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); - MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); - MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(fw_dbg_clear, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); @@ -2441,6 +2192,9 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm, &iwl_dbgfs_mem_ops); + debugfs_create_bool("rx_ts_ptp", 0600, mvm->debugfs_dir, + &mvm->rx_ts_ptp); + /* * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 233ae81884a0..b26141c30c61 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include <linux/etherdevice.h> #include <linux/math64.h> @@ -40,6 +40,12 @@ struct iwl_mvm_ftm_pasn_entry { u32 flags; }; +struct iwl_mvm_ftm_iter_data { + u8 *cipher; + u8 *bssid; + u8 *tk; +}; + int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len) @@ -53,6 +59,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (!pasn) return -ENOBUFS; + iwl_mvm_ftm_remove_pasn_sta(mvm, addr); + pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); switch (pasn->cipher) { @@ -429,47 +437,55 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, return 0; } -#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ +#define FTM_SET_FLAG(flag) (*flags |= \ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) static void -iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, - struct cfg80211_pmsr_request_peer *peer, - struct iwl_tof_range_req_ap_entry_v6 *target) +iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm, + struct cfg80211_pmsr_request_peer *peer, + __le32 *flags) { - memcpy(target->bssid, peer->addr, ETH_ALEN); - target->burst_period = - cpu_to_le16(peer->ftm.burst_period); - target->samples_per_burst = peer->ftm.ftms_per_burst; - target->num_of_bursts = peer->ftm.num_bursts_exp; - target->ftmr_max_retries = peer->ftm.ftmr_retries; - target->initiator_ap_flags = cpu_to_le32(0); + *flags = cpu_to_le32(0); if (peer->ftm.asap) - FTM_PUT_FLAG(ASAP); + FTM_SET_FLAG(ASAP); if (peer->ftm.request_lci) - FTM_PUT_FLAG(LCI_REQUEST); + FTM_SET_FLAG(LCI_REQUEST); if (peer->ftm.request_civicloc) - FTM_PUT_FLAG(CIVIC_REQUEST); + FTM_SET_FLAG(CIVIC_REQUEST); if (IWL_MVM_FTM_INITIATOR_DYNACK) - FTM_PUT_FLAG(DYN_ACK); + FTM_SET_FLAG(DYN_ACK); if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) - FTM_PUT_FLAG(ALGO_LR); + FTM_SET_FLAG(ALGO_LR); else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) - FTM_PUT_FLAG(ALGO_FFT); + FTM_SET_FLAG(ALGO_FFT); if (peer->ftm.trigger_based) - FTM_PUT_FLAG(TB); + FTM_SET_FLAG(TB); else if (peer->ftm.non_trigger_based) - FTM_PUT_FLAG(NON_TB); + FTM_SET_FLAG(NON_TB); if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && peer->ftm.lmr_feedback) - FTM_PUT_FLAG(LMR_FEEDBACK); + FTM_SET_FLAG(LMR_FEEDBACK); +} + +static void +iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v6 *target) +{ + memcpy(target->bssid, peer->addr, ETH_ALEN); + target->burst_period = + cpu_to_le16(peer->ftm.burst_period); + target->samples_per_burst = peer->ftm.ftms_per_burst; + target->num_of_bursts = peer->ftm.num_bursts_exp; + target->ftmr_max_retries = peer->ftm.ftmr_retries; + iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); } static int @@ -512,21 +528,10 @@ iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, return 0; } -static int -iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct cfg80211_pmsr_request_peer *peer, - struct iwl_tof_range_req_ap_entry_v6 *target) +static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + u8 *sta_id, __le32 *flags) { - int ret; - - ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, - &target->format_bw, - &target->ctrl_ch_position); - if (ret) - return ret; - - iwl_mvm_ftm_put_target_common(mvm, peer, target); - if (vif->cfg.assoc) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_sta *sta; @@ -538,8 +543,8 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) continue; - target->sta_id = mvmvif->link[link_id]->ap_sta_id; - sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]); + *sta_id = mvmvif->link[link_id]->ap_sta_id; + sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return PTR_ERR_OR_ZERO(sta); @@ -547,14 +552,42 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) - FTM_PUT_FLAG(PMF); + FTM_SET_FLAG(PMF); break; } rcu_read_unlock(); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->ftm_unprotected) { + *sta_id = IWL_INVALID_STA; + *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); + } +#endif } else { - target->sta_id = IWL_MVM_INVALID_STA; + *sta_id = IWL_INVALID_STA; } + return 0; +} + +static int +iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v6 *target) +{ + int ret; + + ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, + &target->format_bw, + &target->ctrl_ch_position); + if (ret) + return ret; + + iwl_mvm_ftm_put_target_common(mvm, peer, target); + + iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, + &target->initiator_ap_flags); + /* * TODO: Beacon interval is currently unknown, so use the common value * of 100 TUs. @@ -692,57 +725,63 @@ static void iter(struct ieee80211_hw *hw, struct ieee80211_key_conf *key, void *data) { - struct iwl_tof_range_req_ap_entry_v6 *target = data; + struct iwl_mvm_ftm_iter_data *target = data; if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) return; WARN_ON(!sta->mfp); - if (WARN_ON(key->keylen > sizeof(target->tk))) - return; - - memcpy(target->tk, key->key, key->keylen); - target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); - WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID); + target->tk = key->key; + *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); + WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID); } static void iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_tof_range_req_ap_entry_v7 *target) + u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk, + u8 *rx_pn, u8 *tx_pn, __le32 *flags) { struct iwl_mvm_ftm_pasn_entry *entry; - u32 flags = le32_to_cpu(target->initiator_ap_flags); +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (mvmvif->ftm_unprotected) + return; +#endif - if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB | + if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB | IWL_INITIATOR_AP_FLAGS_TB))) return; lockdep_assert_held(&mvm->mutex); list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { - if (memcmp(entry->addr, target->bssid, sizeof(entry->addr))) + if (memcmp(entry->addr, bssid, sizeof(entry->addr))) continue; - target->cipher = entry->cipher; + *cipher = entry->cipher; if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK) - memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); + memcpy(hltk, entry->hltk, sizeof(entry->hltk)); else - memset(target->hltk, 0, sizeof(target->hltk)); + memset(hltk, 0, sizeof(entry->hltk)); if (vif->cfg.assoc && - !memcmp(vif->bss_conf.bssid, target->bssid, - sizeof(target->bssid))) - ieee80211_iter_keys(mvm->hw, vif, iter, target); - else - memcpy(target->tk, entry->tk, sizeof(target->tk)); + !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) { + struct iwl_mvm_ftm_iter_data target; + + target.bssid = bssid; + target.cipher = cipher; + ieee80211_iter_keys(mvm->hw, vif, iter, &target); + } else { + memcpy(tk, entry->tk, sizeof(entry->tk)); + } - memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn)); - memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn)); + memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn)); + memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn)); - target->initiator_ap_flags |= - cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED); + FTM_SET_FLAG(SECURED); return; } } @@ -756,7 +795,11 @@ iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (err) return err; - iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); + iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, + &target->cipher, target->hltk, + target->tk, target->rx_pn, + target->tx_pn, + &target->initiator_ap_flags); return err; } @@ -821,9 +864,10 @@ iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * If secure LTF is turned off, replace the flag with PMF only */ flags = le32_to_cpu(target->initiator_ap_flags); - if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && - !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { - flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; + if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { + if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) + flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; + flags |= IWL_INITIATOR_AP_FLAGS_PMF; target->initiator_ap_flags = cpu_to_le32(flags); } @@ -902,6 +946,105 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } +static int +iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v10 *target) +{ + u32 i2r_max_sts, flags; + int ret; + + ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, + &target->format_bw, + &target->ctrl_ch_position); + if (ret) + return ret; + + memcpy(target->bssid, peer->addr, ETH_ALEN); + target->burst_period = + cpu_to_le16(peer->ftm.burst_period); + target->samples_per_burst = peer->ftm.ftms_per_burst; + target->num_of_bursts = peer->ftm.num_bursts_exp; + iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); + iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, + &target->initiator_ap_flags); + iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, + &target->cipher, target->hltk, + target->tk, target->rx_pn, + target->tx_pn, + &target->initiator_ap_flags); + + i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : + IWL_MVM_FTM_I2R_MAX_STS; + + target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | + (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) | + (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); + target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | + (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) | + (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); + + if (peer->ftm.non_trigger_based) { + target->min_time_between_msr = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); + target->burst_period = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); + } else { + target->min_time_between_msr = cpu_to_le16(0); + } + + target->band = + iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); + + /* + * TODO: Beacon interval is currently unknown, so use the common value + * of 100 TUs. + */ + target->beacon_interval = cpu_to_le16(100); + + /* + * If secure LTF is turned off, replace the flag with PMF only + */ + flags = le32_to_cpu(target->initiator_ap_flags); + if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { + if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) + flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; + + flags |= IWL_INITIATOR_AP_FLAGS_PMF; + target->initiator_ap_flags = cpu_to_le32(flags); + } + + return 0; +} + +static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd_v14 cmd; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), + .dataflags[0] = IWL_HCMD_DFL_DUP, + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u8 i; + int err; + + iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); + + for (i = 0; i < cmd.num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i]; + + err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target); + if (err) + return err; + } + + return iwl_mvm_ftm_send_cmd(mvm, &hcmd); +} + int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { @@ -920,6 +1063,11 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { + case 15: + /* Version 15 has the same struct as 14 */ + case 14: + err = iwl_mvm_ftm_start_v14(mvm, vif, req); + break; case 13: err = iwl_mvm_ftm_start_v13(mvm, vif, req); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index 8f10590f9cdd..e6e468e81ab3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include <net/cfg80211.h> #include <linux/etherdevice.h> @@ -12,6 +12,9 @@ struct iwl_mvm_pasn_sta { struct list_head list; struct iwl_mvm_int_sta int_sta; u8 addr[ETH_ALEN]; + + /* must be last as it followed by buffer holding the key */ + struct ieee80211_key_conf keyconf; }; struct iwl_mvm_pasn_hltk_data { @@ -85,7 +88,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, static void iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm, - struct iwl_tof_responder_config_cmd_v9 *cmd) + struct iwl_tof_responder_config_cmd *cmd) { /* Up to 2 R2I STS are allowed on the responder */ u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ? @@ -114,7 +117,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, * field interpretation is different), so the same struct can be use * for all cases. */ - struct iwl_tof_responder_config_cmd_v9 cmd = { + struct iwl_tof_responder_config_cmd cmd = { .channel_num = chandef->chan->hw_value, .cmd_valid_fields = cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | @@ -128,8 +131,13 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (cmd_ver >= 10) { + cmd.band = + iwl_mvm_phy_band_from_nl80211(chandef->chan->band); + } + /* Use a default of bss_color=1 for now */ - if (cmd_ver == 9) { + if (cmd_ver >= 9) { cmd.cmd_valid_fields |= cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR | IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR); @@ -145,7 +153,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, } if (cmd_ver >= 8) - iwl_mvm_ftm_responder_set_ndp(mvm, &cmd); + iwl_mvm_ftm_responder_set_ndp(mvm, (void *)&cmd); if (cmd_ver >= 7) err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw, @@ -303,6 +311,10 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm, { list_del(&sta->list); + if (sta->keyconf.keylen) + iwl_mvm_sec_key_del_pasn(mvm, vif, BIT(sta->int_sta.sta_id), + &sta->keyconf); + if (iwl_mvm_has_mld_api(mvm->fw)) iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id); else @@ -342,6 +354,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, } if (hltk && hltk_len) { + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) { + IWL_ERR(mvm, "No support for secure LTF measurement\n"); + return -EINVAL; + } + hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher); if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) { IWL_ERR(mvm, "invalid cipher: %u\n", cipher); @@ -352,12 +370,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, } if (tk && tk_len) { - sta = kzalloc(sizeof(*sta), GFP_KERNEL); + sta = kzalloc(sizeof(*sta) + tk_len, GFP_KERNEL); if (!sta) return -ENOBUFS; ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr, - cipher, tk, tk_len); + cipher, tk, tk_len, &sta->keyconf); if (ret) { kfree(sta); return ret; @@ -425,7 +443,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif, rcu_read_unlock(); phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, &ctx.ap, ctx.rx_chains_static, ctx.rx_chains_dynamic); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 1252084662c6..d10877856049 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -16,6 +16,7 @@ #include "fw/acpi.h" #include "fw/pnvm.h" #include "fw/uefi.h" +#include "fw/regulatory.h" #include "mvm.h" #include "fw/dbg.h" @@ -27,9 +28,6 @@ #define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ) #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) -#define IWL_UATS_VLP_AP_SUPPORTED BIT(29) -#define IWL_UATS_AFC_AP_SUPPORTED BIT(30) - struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; @@ -93,20 +91,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; - __le32 *dump_data = mfu_dump_notif->data; - int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); - int i; if (mfu_dump_notif->index_num == 0) IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", le32_to_cpu(mfu_dump_notif->assert_id)); - - for (i = 0; i < n_words; i++) - IWL_DEBUG_INFO(mvm, - "MFUART assert dump, dword %u: 0x%08x\n", - le16_to_cpu(mfu_dump_notif->index_num) * - n_words + i, - le32_to_cpu(dump_data[i])); } static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, @@ -417,7 +405,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, UREG_LMAC2_CURRENT_PC)); } - if (ret == -ETIMEDOUT && !mvm->pldr_sync) + if (ret == -ETIMEDOUT && !mvm->fw_product_reset) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_ALIVE_TIMEOUT); @@ -434,6 +422,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, /* if reached this point, Alive notification was received */ iwl_mei_alive_notif(true); + iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); + ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait, &mvm->fw->ucode_capa); if (ret) { @@ -442,8 +432,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, return ret; } - iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); - /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they @@ -469,12 +457,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, #endif /* + * For pre-MLD API (MLD API doesn't use the timestamps): * All the BSSes in the BSS table include the GP2 in the system * at the beacon Rx time, this is of course no longer relevant * since we are resetting the firmware. * Purge all the BSS table. */ - cfg80211_bss_flush(mvm->hw->wiphy); + if (!mvm->mld_api_is_used) + cfg80211_bss_flush(mvm->hw->wiphy); return 0; } @@ -487,51 +477,45 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, #endif /* CONFIG_ACPI */ } -#if defined(CONFIG_ACPI) && defined(CONFIG_EFI) static void iwl_mvm_uats_init(struct iwl_mvm *mvm) { u8 cmd_ver; int ret; struct iwl_host_cmd cmd = { .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, - UATS_TABLE_CMD), + MCC_ALLOWED_AP_TYPE_CMD), .flags = 0, .data[0] = &mvm->fwrt.uats_table, .len[0] = sizeof(mvm->fwrt.uats_table), .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; - if (!(mvm->trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210)) { + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n"); return; } - if (!mvm->fwrt.uats_enabled) { - IWL_DEBUG_RADIO(mvm, "UATS feature is disabled\n"); - return; - } - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver != 1) { IWL_DEBUG_RADIO(mvm, - "UATS_TABLE_CMD ver %d not supported\n", + "MCC_ALLOWED_AP_TYPE_CMD ver %d not supported\n", cmd_ver); return; } ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt); if (ret < 0) { - IWL_ERR(mvm, "failed to read UATS table (%d)\n", ret); + IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret); return; } ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret < 0) - IWL_ERR(mvm, "failed to send UATS_TABLE_CMD (%d)\n", ret); + IWL_ERR(mvm, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n", + ret); else - IWL_DEBUG_RADIO(mvm, "UATS_TABLE_CMD sent to FW\n"); + IWL_DEBUG_RADIO(mvm, "MCC_ALLOWED_AP_TYPE_CMD sent to FW\n"); } static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) @@ -567,17 +551,6 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) return ret; } -#else - -static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) -{ - return 0; -} - -static void iwl_mvm_uats_init(struct iwl_mvm *mvm) -{ -} -#endif static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { @@ -647,8 +620,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); /* if needed, we'll reset this on our way out later */ - mvm->pldr_sync = sb_cfg == SB_CFG_RESIDES_IN_ROM; - if (mvm->pldr_sync && iwl_mei_pldr_req()) + mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM; + if (mvm->fw_product_reset && iwl_mei_pldr_req()) return -EBUSY; } @@ -667,9 +640,10 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); /* if we needed reset then fail here, but notify and remove */ - if (mvm->pldr_sync) { + if (mvm->fw_product_reset) { iwl_mei_alive_notif(false); - iwl_trans_pcie_remove(mvm->trans, true); + iwl_trans_pcie_reset(mvm->trans, + IWL_RESET_MODE_RESCAN); } goto error; @@ -677,6 +651,11 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, NULL); + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans, + CNVI_PMU_STEP_FLOW) & + CNVI_PMU_STEP_FLOW_FORCE_URM); + /* Send init config command to mark that we are sending NVM access * commands */ @@ -701,14 +680,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) goto error; } - if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) { - ret = iwl_nvm_init(mvm); - if (ret) { - IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); - goto error; - } - } - ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_ACCESS_COMPLETE), CMD_SEND_IN_RFKILL, @@ -733,7 +704,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) return ret; /* Read the NVM only at driver load time, no need to do this twice */ - if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) { + if (!mvm->nvm_data) { mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw, mvm->set_tx_ant, mvm->set_rx_ant); if (IS_ERR(mvm->nvm_data)) { @@ -858,7 +829,7 @@ remove_notif: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: mvm->rfkill_safe_init_done = false; - if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { + if (!mvm->nvm_data) { /* we want to debug INIT and we have no NVM - fake */ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + sizeof(struct ieee80211_channel) + @@ -890,24 +861,39 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) sizeof(cmd), &cmd); } -#ifdef CONFIG_ACPI int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { u32 cmd_id = REDUCE_TX_POWER_CMD; - struct iwl_dev_tx_power_cmd cmd = { + struct iwl_dev_tx_power_cmd_v3_v8 cmd = { + .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), + }; + struct iwl_dev_tx_power_cmd cmd_v9_v10 = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), }; __le16 *per_chain; int ret; u16 len = 0; u32 n_subbands; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); - if (cmd_ver == 7) { + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3); + void *cmd_data = &cmd; + + if (cmd_ver == 10) { + len = sizeof(cmd_v9_v10.v10); + n_subbands = IWL_NUM_SUB_BANDS_V2; + per_chain = &cmd_v9_v10.v10.per_chain[0][0][0]; + cmd_v9_v10.v10.flags = + cpu_to_le32(mvm->fwrt.reduced_power_flags); + } else if (cmd_ver == 9) { + len = sizeof(cmd_v9_v10.v9); + n_subbands = IWL_NUM_SUB_BANDS_V1; + per_chain = &cmd_v9_v10.v9.per_chain[0][0]; + } else if (cmd_ver >= 7) { len = sizeof(cmd.v7); n_subbands = IWL_NUM_SUB_BANDS_V2; per_chain = cmd.v7.per_chain[0][0]; cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags); + if (cmd_ver == 8) + len = sizeof(cmd.v8); } else if (cmd_ver == 6) { len = sizeof(cmd.v6); n_subbands = IWL_NUM_SUB_BANDS_V2; @@ -928,12 +914,17 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) per_chain = cmd.v3.per_chain[0][0]; } - /* all structs have the same common part, add it */ + /* all structs have the same common part, add its length */ len += sizeof(cmd.common); - ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, - IWL_NUM_CHAIN_TABLES, - n_subbands, prof_a, prof_b); + if (cmd_ver < 9) + len += sizeof(cmd.per_band); + else + cmd_data = &cmd_v9_v10; + + ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain, + IWL_NUM_CHAIN_TABLES, + n_subbands, prof_a, prof_b); /* return on error or if the profile is disabled (positive number) */ if (ret) @@ -942,7 +933,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) iwl_mei_set_power_limit(per_chain); IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); - return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data); } int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) @@ -989,7 +980,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) resp = (void *)cmd.resp_pkt->data; ret = le32_to_cpu(resp->profile_idx); - if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3)) + if (WARN_ON(ret > BIOS_GEO_MAX_PROFILE_NUM)) ret = -EIO; iwl_free_resp(&cmd); @@ -1003,7 +994,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) u16 len; u32 n_bands; u32 n_profiles; - u32 sk = 0; + __le32 sk = cpu_to_le32(0); int ret; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); @@ -1020,27 +1011,35 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) /* the ops field is at the same spot for all versions, so set in v1 */ cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); + /* Only set to South Korea if the table revision is 1 */ + if (mvm->fwrt.geo_rev == 1) + sk = cpu_to_le32(1); + if (cmd_ver == 5) { len = sizeof(cmd.v5); n_bands = ARRAY_SIZE(cmd.v5.table[0]); - n_profiles = ACPI_NUM_GEO_PROFILES_REV3; + n_profiles = BIOS_GEO_MAX_PROFILE_NUM; + cmd.v5.table_revision = sk; } else if (cmd_ver == 4) { len = sizeof(cmd.v4); n_bands = ARRAY_SIZE(cmd.v4.table[0]); - n_profiles = ACPI_NUM_GEO_PROFILES_REV3; + n_profiles = BIOS_GEO_MAX_PROFILE_NUM; + cmd.v4.table_revision = sk; } else if (cmd_ver == 3) { len = sizeof(cmd.v3); n_bands = ARRAY_SIZE(cmd.v3.table[0]); - n_profiles = ACPI_NUM_GEO_PROFILES; + n_profiles = BIOS_GEO_MIN_PROFILE_NUM; + cmd.v3.table_revision = sk; } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) { len = sizeof(cmd.v2); n_bands = ARRAY_SIZE(cmd.v2.table[0]); - n_profiles = ACPI_NUM_GEO_PROFILES; + n_profiles = BIOS_GEO_MIN_PROFILE_NUM; + cmd.v2.table_revision = sk; } else { len = sizeof(cmd.v1); n_bands = ARRAY_SIZE(cmd.v1.table[0]); - n_profiles = ACPI_NUM_GEO_PROFILES; + n_profiles = BIOS_GEO_MIN_PROFILE_NUM; } BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) != @@ -1052,8 +1051,8 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table)); /* the table is at the same position for all versions, so set use v1 */ - ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0], - n_bands, n_profiles); + ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0], + n_bands, n_profiles); /* * It is a valid scenario to not support SAR, or miss wgds table, @@ -1062,27 +1061,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) if (ret) return 0; - /* Only set to South Korea if the table revision is 1 */ - if (mvm->fwrt.geo_rev == 1) - sk = 1; - - /* - * Set the table_revision to South Korea (1) or not (0). The - * element name is misleading, as it doesn't contain the table - * revision number, but whether the South Korea variation - * should be used. - * This must be done after calling iwl_sar_geo_init(). - */ - if (cmd_ver == 5) - cmd.v5.table_revision = cpu_to_le32(sk); - else if (cmd_ver == 4) - cmd.v4.table_revision = cpu_to_le32(sk); - else if (cmd_ver == 3) - cmd.v3.table_revision = cpu_to_le32(sk); - else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, - IWL_UCODE_TLV_API_SAR_TABLE_VER)) - cmd.v2.table_revision = cpu_to_le32(sk); - return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } @@ -1091,7 +1069,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) union iwl_ppag_table_cmd cmd; int ret, cmd_size; - ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size); + ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size); /* Not supporting PPAG table is a valid scenario */ if (ret < 0) return 0; @@ -1110,111 +1088,54 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { /* no need to read the table, done in INIT stage */ - if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt))) + if (!(iwl_is_ppag_approved(&mvm->fwrt))) return 0; return iwl_mvm_ppag_send_cmd(mvm); } -static const struct dmi_system_id dmi_tas_approved_list[] = { - { .ident = "HP", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - }, - }, - { .ident = "SAMSUNG", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), - }, - }, - { .ident = "LENOVO", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - }, - }, - { .ident = "DELL", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - }, - }, - { .ident = "MSFT", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), - }, - }, - { .ident = "Acer", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - }, - }, - { .ident = "ASUS", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - }, - }, - { .ident = "GOOGLE-HP", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Google"), - DMI_MATCH(DMI_BOARD_VENDOR, "HP"), - }, - }, - { .ident = "MSI", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), - }, - }, - { .ident = "Honor", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HONOR"), - }, - }, - /* keep last */ - {} -}; - -bool iwl_mvm_is_vendor_in_approved_list(void) +static bool +iwl_mvm_add_to_tas_block_list(u16 *list, u8 *size, u16 mcc) { - return dmi_check_system(dmi_tas_approved_list); -} - -static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc) -{ - int i; - u32 size = le32_to_cpu(*le_size); - /* Verify that there is room for another country */ - if (size >= IWL_TAS_BLOCK_LIST_MAX) + if (*size >= IWL_WTAS_BLACK_LIST_MAX) return false; - for (i = 0; i < size; i++) { - if (list[i] == cpu_to_le32(mcc)) + for (u8 i = 0; i < *size; i++) { + if (list[i] == mcc) return true; } - list[size++] = cpu_to_le32(mcc); - *le_size = cpu_to_le32(size); + list[*size++] = mcc; return true; } static void iwl_mvm_tas_init(struct iwl_mvm *mvm) { u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG); + int fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + struct iwl_tas_selection_data selection_data = {}; + struct iwl_tas_config_cmd_v2_v4 cmd_v2_v4 = {}; + struct iwl_tas_config_cmd cmd_v5 = {}; + struct iwl_tas_data data = {}; + void *cmd_data = &cmd_v2_v4; + int cmd_size; int ret; - union iwl_tas_config_cmd cmd = {}; - int cmd_size, fw_ver; - BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) < - APCI_WTAS_BLACK_LIST_MAX); + BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) != + IWL_WTAS_BLACK_LIST_MAX); + BUILD_BUG_ON(ARRAY_SIZE(cmd_v2_v4.common.block_list_array) != + IWL_WTAS_BLACK_LIST_MAX); + BUILD_BUG_ON(ARRAY_SIZE(cmd_v5.block_list_array) != + IWL_WTAS_BLACK_LIST_MAX); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) { IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n"); return; } - fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); - - ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver); + ret = iwl_bios_get_tas_table(&mvm->fwrt, &data); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "TAS table invalid or unavailable. (%d)\n", @@ -1222,19 +1143,19 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) return; } - if (ret == 0) + if (ret == 0 && fw_ver < 5) return; - if (!iwl_mvm_is_vendor_in_approved_list()) { + if (!iwl_is_tas_approved()) { IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n", - dmi_get_system_info(DMI_SYS_VENDOR)); - if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array, - &cmd.v4.block_list_size, - IWL_MCC_US)) || - (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array, - &cmd.v4.block_list_size, - IWL_MCC_CANADA))) { + dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>"); + if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array, + &data.block_list_size, + IWL_MCC_US)) || + (!iwl_mvm_add_to_tas_block_list(data.block_list_array, + &data.block_list_size, + IWL_MCC_CANADA))) { IWL_DEBUG_RADIO(mvm, "Unable to add US/Canada to TAS block list, disabling TAS\n"); return; @@ -1242,142 +1163,96 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) } else { IWL_DEBUG_RADIO(mvm, "System vendor '%s' is in the approved list.\n", - dmi_get_system_info(DMI_SYS_VENDOR)); + dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>"); + } + + if (fw_ver < 5) { + selection_data = iwl_parse_tas_selection(data.tas_selection, + data.table_revision); + cmd_v2_v4.common.block_list_size = + cpu_to_le32(data.block_list_size); + for (u8 i = 0; i < data.block_list_size; i++) + cmd_v2_v4.common.block_list_array[i] = + cpu_to_le32(data.block_list_array[i]); + } + + if (fw_ver == 5) { + cmd_size = sizeof(cmd_v5); + cmd_data = &cmd_v5; + cmd_v5.block_list_size = cpu_to_le16(data.block_list_size); + for (u16 i = 0; i < data.block_list_size; i++) + cmd_v5.block_list_array[i] = + cpu_to_le16(data.block_list_array[i]); + cmd_v5.tas_config_info.table_source = data.table_source; + cmd_v5.tas_config_info.table_revision = data.table_revision; + cmd_v5.tas_config_info.value = cpu_to_le32(data.tas_selection); + } else if (fw_ver == 4) { + cmd_size = sizeof(cmd_v2_v4.common) + sizeof(cmd_v2_v4.v4); + cmd_v2_v4.v4.override_tas_iec = selection_data.override_tas_iec; + cmd_v2_v4.v4.enable_tas_iec = selection_data.enable_tas_iec; + cmd_v2_v4.v4.usa_tas_uhb_allowed = + selection_data.usa_tas_uhb_allowed; + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT) && + selection_data.canada_tas_uhb_allowed) + cmd_v2_v4.v4.uhb_allowed_flags = TAS_UHB_ALLOWED_CANADA; + } else if (fw_ver == 3) { + cmd_size = sizeof(cmd_v2_v4.common) + sizeof(cmd_v2_v4.v3); + cmd_v2_v4.v3.override_tas_iec = + cpu_to_le16(selection_data.override_tas_iec); + cmd_v2_v4.v3.enable_tas_iec = + cpu_to_le16(selection_data.enable_tas_iec); + } else if (fw_ver == 2) { + cmd_size = sizeof(cmd_v2_v4.common); + } else { + return; } - /* v4 is the same size as v3, so no need to differentiate here */ - cmd_size = fw_ver < 3 ? - sizeof(struct iwl_tas_config_cmd_v2) : - sizeof(struct iwl_tas_config_cmd_v3); - - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, cmd_data); if (ret < 0) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } -static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) +static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { - u8 value; - int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE, - &iwl_rfi_guid, &value); + u32 value = 0; + /* default behaviour is disabled */ + bool bios_enable_rfi = false; + int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value); + if (ret < 0) { IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret); + return bios_enable_rfi; + } - } else if (value >= DSM_VALUE_RFI_MAX) { - IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n", - value); - - } else if (value == DSM_VALUE_RFI_ENABLE) { + value &= DSM_VALUE_RFI_DISABLE; + /* RFI BIOS CONFIG value can be 0 or 3 only. + * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled. + * 1 and 2 are invalid BIOS configurations, So, it's not possible to + * disable ddr/dlvr separately. + */ + if (!value) { IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n"); - return DSM_VALUE_RFI_ENABLE; + bios_enable_rfi = true; + } else if (value == DSM_VALUE_RFI_DISABLE) { + IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n"); + } else { + IWL_DEBUG_RADIO(mvm, + "DSM RFI got invalid value, value=%d\n", value); } - IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n"); - - /* default behaviour is disabled */ - return DSM_VALUE_RFI_DISABLE; + return bios_enable_rfi; } static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { + struct iwl_lari_config_change_cmd cmd; + size_t cmd_size; int ret; - u32 value; - struct iwl_lari_config_change_cmd_v7 cmd = {}; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(REGULATORY_AND_NVM_GROUP, - LARI_CONFIG_CHANGE), 1); - - cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt); - ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT, - &iwl_guid, &value); - if (!ret) - cmd.oem_11ax_allow_bitmap = cpu_to_le32(value); - - ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, - DSM_FUNC_ENABLE_UNII4_CHAN, - &iwl_guid, &value); - if (!ret) - cmd.oem_unii4_allow_bitmap = cpu_to_le32(value); - - ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, - DSM_FUNC_ACTIVATE_CHANNEL, - &iwl_guid, &value); + ret = iwl_fill_lari_config(&mvm->fwrt, &cmd, &cmd_size); if (!ret) { - if (cmd_ver < 8) - value &= ~ACTIVATE_5G2_IN_WW_MASK; - cmd.chan_state_active_bitmap = cpu_to_le32(value); - } - - ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, - DSM_FUNC_ENABLE_6E, - &iwl_guid, &value); - if (!ret) - cmd.oem_uhb_allow_bitmap = cpu_to_le32(value); - - ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, - DSM_FUNC_FORCE_DISABLE_CHANNELS, - &iwl_guid, &value); - if (!ret) - cmd.force_disable_channels_bitmap = cpu_to_le32(value); - - ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, - DSM_FUNC_ENERGY_DETECTION_THRESHOLD, - &iwl_guid, &value); - if (!ret) - cmd.edt_bitmap = cpu_to_le32(value); - - if (cmd.config_bitmap || - cmd.oem_uhb_allow_bitmap || - cmd.oem_11ax_allow_bitmap || - cmd.oem_unii4_allow_bitmap || - cmd.chan_state_active_bitmap || - cmd.force_disable_channels_bitmap || - cmd.edt_bitmap) { - size_t cmd_size; - - switch (cmd_ver) { - case 8: - case 7: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); - break; - case 6: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); - break; - case 5: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); - break; - case 4: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); - break; - case 3: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); - break; - case 2: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); - break; - default: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); - break; - } - - IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", - le32_to_cpu(cmd.config_bitmap), - le32_to_cpu(cmd.oem_11ax_allow_bitmap)); - IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", - le32_to_cpu(cmd.oem_unii4_allow_bitmap), - le32_to_cpu(cmd.chan_state_active_bitmap), - cmd_ver); - IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", - le32_to_cpu(cmd.oem_uhb_allow_bitmap), - le32_to_cpu(cmd.force_disable_channels_bitmap)); - IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x\n", - le32_to_cpu(cmd.edt_bitmap)); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), @@ -1387,18 +1262,16 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) "Failed to send LARI_CONFIG_CHANGE (%d)\n", ret); } - - if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED || - le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED) - mvm->fwrt.uats_enabled = TRUE; } -void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) +void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm) { int ret; + iwl_acpi_get_guid_lock_status(&mvm->fwrt); + /* read PPAG table */ - ret = iwl_acpi_get_ppag_table(&mvm->fwrt); + ret = iwl_bios_get_ppag_table(&mvm->fwrt); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "PPAG BIOS table invalid or unavailable. (%d)\n", @@ -1406,7 +1279,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) } /* read SAR tables */ - ret = iwl_sar_get_wrds_table(&mvm->fwrt); + ret = iwl_bios_get_wrds_table(&mvm->fwrt); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "WRDS SAR BIOS table invalid or unavailable. (%d)\n", @@ -1415,7 +1288,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) * If not available, don't fail and don't bother with EWRD and * WGDS */ - if (!iwl_sar_get_wgds_table(&mvm->fwrt)) { + if (!iwl_bios_get_wgds_table(&mvm->fwrt)) { /* * If basic SAR is not available, we check for WGDS, * which should *not* be available either. If it is @@ -1426,7 +1299,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) } } else { - ret = iwl_sar_get_ewrd_table(&mvm->fwrt); + ret = iwl_bios_get_ewrd_table(&mvm->fwrt); /* if EWRD is not available, we can still use * WRDS, so don't fail */ if (ret < 0) @@ -1436,7 +1309,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) /* read geo SAR table */ if (iwl_sar_geo_support(&mvm->fwrt)) { - ret = iwl_sar_get_wgds_table(&mvm->fwrt); + ret = iwl_bios_get_wgds_table(&mvm->fwrt); if (ret < 0) IWL_DEBUG_RADIO(mvm, "Geo SAR BIOS table invalid or unavailable. (%d)\n", @@ -1446,64 +1319,23 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) } iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters); -} -#else /* CONFIG_ACPI */ - -inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, - int prof_a, int prof_b) -{ - return 1; -} - -inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) -{ - return -ENOENT; -} -static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) -{ - return 0; -} - -int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) -{ - return -ENOENT; -} - -static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) -{ - return 0; -} - -static void iwl_mvm_tas_init(struct iwl_mvm *mvm) -{ -} - -static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) -{ -} - -bool iwl_mvm_is_vendor_in_approved_list(void) -{ - return false; -} - -static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) -{ - return DSM_VALUE_RFI_DISABLE; + if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid)) + IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n"); } -void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) +static void iwl_mvm_disconnect_iterator(void *data, u8 *mac, + struct ieee80211_vif *vif) { + if (vif->type == NL80211_IFTYPE_STATION) + ieee80211_hw_restart_disconnect(vif); } -#endif /* CONFIG_ACPI */ - void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) { u32 error_log_size = mvm->fw->ucode_capa.error_log_size; + u32 status = 0; int ret; - u32 resp; struct iwl_fw_error_recovery_cmd recovery_cmd = { .flags = cpu_to_le32(flags), @@ -1511,7 +1343,6 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) }; struct iwl_host_cmd host_cmd = { .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), - .flags = CMD_WANT_SKB, .data = {&recovery_cmd, }, .len = {sizeof(recovery_cmd), }, }; @@ -1531,7 +1362,7 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) recovery_cmd.buf_size = cpu_to_le32(error_log_size); } - ret = iwl_mvm_send_cmd(mvm, &host_cmd); + ret = iwl_mvm_send_cmd_status(mvm, &host_cmd, &status); kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; @@ -1542,11 +1373,15 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */ if (flags & ERROR_RECOVERY_UPDATE_DB) { - resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data); - if (resp) + if (status) { IWL_ERR(mvm, "Failed to send recovery cmd blob was invalid %d\n", - resp); + status); + + ieee80211_iterate_interfaces(mvm->hw, 0, + iwl_mvm_disconnect_iterator, + mvm); + } } } @@ -1566,9 +1401,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); - - if (iwlmvm_mod_params.init_dbg) - return 0; return ret; } @@ -1596,6 +1428,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) int ret, i; struct ieee80211_supported_band *sband = NULL; + lockdep_assert_wiphy(mvm->hw->wiphy); lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); @@ -1605,14 +1438,14 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - if (ret != -ERFKILL && !mvm->pldr_sync) + if (ret != -ERFKILL && !mvm->fw_product_reset) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); goto error; } /* FW loaded successfully */ - mvm->pldr_sync = false; + mvm->fw_product_reset = false; iwl_fw_disable_dbg_asserts(&mvm->fwrt); iwl_get_shared_mem_conf(&mvm->fwrt); @@ -1676,12 +1509,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL); } - for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++) + for (i = 0; i < IWL_FW_MAX_LINK_ID + 1; i++) RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL); - memset(&mvm->fw_link_ids_map, 0, sizeof(mvm->fw_link_ids_map)); - - mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; + mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA; /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); @@ -1781,9 +1612,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (!mvm->ptp_data.ptp_clock) iwl_mvm_ptp_init(mvm); - if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid)) - IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n"); - ret = iwl_mvm_ppag_init(mvm); if (ret) goto error; @@ -1803,7 +1631,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_uats_init(mvm); if (iwl_rfi_supported(mvm)) { - if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE) + if (iwl_mvm_eval_dsm_rfi(mvm)) iwl_rfi_send_config_cmd(mvm, NULL); } @@ -1812,8 +1640,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: - if (!iwlmvm_mod_params.init_dbg || !ret) - iwl_mvm_stop_device(mvm); + iwl_mvm_stop_device(mvm); return ret; } @@ -1821,6 +1648,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) { int ret, i; + lockdep_assert_wiphy(mvm->hw->wiphy); lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index be48b0fc9cb6..851869c0bd50 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -1,33 +1,67 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022 - 2023 Intel Corporation + * Copyright (C) 2022 - 2024 Intel Corporation */ #include "mvm.h" #include "time-event.h" -static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvm_vif) -{ - u32 link_id; +#define HANDLE_ESR_REASONS(HOW) \ + HOW(BLOCKED_PREVENTION) \ + HOW(BLOCKED_WOWLAN) \ + HOW(BLOCKED_TPT) \ + HOW(BLOCKED_FW) \ + HOW(BLOCKED_NON_BSS) \ + HOW(BLOCKED_ROC) \ + HOW(BLOCKED_TMP_NON_BSS) \ + HOW(EXIT_MISSED_BEACON) \ + HOW(EXIT_LOW_RSSI) \ + HOW(EXIT_COEX) \ + HOW(EXIT_BANDWIDTH) \ + HOW(EXIT_CSA) \ + HOW(EXIT_LINK_USAGE) \ + HOW(EXIT_FAIL_ENTRY) - lockdep_assert_held(&mvm->mutex); +static const char *const iwl_mvm_esr_states_names[] = { +#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x, + HANDLE_ESR_REASONS(NAME_ENTRY) +}; + +const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state) +{ + int offs = ilog2(state); - link_id = ffz(mvm->fw_link_ids_map); + if (offs >= ARRAY_SIZE(iwl_mvm_esr_states_names) || + !iwl_mvm_esr_states_names[offs]) + return "UNKNOWN"; - /* this case can happen if there're deactivated but not removed links */ - if (link_id > IWL_MVM_FW_MAX_LINK_ID) - return IWL_MVM_FW_LINK_ID_INVALID; + return iwl_mvm_esr_states_names[offs]; +} - mvm->fw_link_ids_map |= BIT(link_id); - return link_id; +static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask) +{ +#define NAME_FMT(x) "%s" +#define NAME_PR(x) (mask & IWL_MVM_ESR_##x) ? "[" #x "]" : "", + IWL_DEBUG_INFO(mvm, + "EMLSR state = " HANDLE_ESR_REASONS(NAME_FMT) + " (0x%x)\n", + HANDLE_ESR_REASONS(NAME_PR) + mask); +#undef NAME_FMT +#undef NAME_PR } -static void iwl_mvm_release_fw_link_id(struct iwl_mvm *mvm, u32 link_id) +static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvm_vif) { + u32 i; + lockdep_assert_held(&mvm->mutex); - if (!WARN_ON(link_id > IWL_MVM_FW_MAX_LINK_ID)) - mvm->fw_link_ids_map &= ~BIT(link_id); + for (i = 0; i < ARRAY_SIZE(mvm->link_id_to_link_conf); i++) + if (!rcu_access_pointer(mvm->link_id_to_link_conf[i])) + return i; + + return IWL_MVM_FW_LINK_ID_INVALID; } static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm, @@ -46,27 +80,45 @@ static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf) +int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - unsigned int link_id = link_conf->link_id; - struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; - struct iwl_link_config_cmd cmd = {}; - - if (WARN_ON_ONCE(!link_info)) - return -EINVAL; + struct iwl_mvm_vif_link_info *link_info = + mvmvif->link[link_conf->link_id]; if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) { link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm, mvmvif); - if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)) + if (link_info->fw_link_id >= + ARRAY_SIZE(mvm->link_id_to_link_conf)) return -EINVAL; rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id], link_conf); } + return 0; +} + +int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + struct iwl_link_config_cmd cmd = {}; + unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD); + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1); + int ret; + + if (WARN_ON_ONCE(!link_info)) + return -EINVAL; + + ret = iwl_mvm_set_link_mapping(mvm, vif, link_conf); + if (ret) + return ret; + /* Update SF - Disable if needed. if this fails, SF might still be on * while many macs are bound, which is forbidden - so fail the binding. */ @@ -84,11 +136,71 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid) memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN); - cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac); + if (cmd_ver < 2) + cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac); return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD); } +struct iwl_mvm_esr_iter_data { + struct ieee80211_vif *vif; + unsigned int link_id; + bool lift_block; +}; + +static void iwl_mvm_esr_vif_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_esr_iter_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int link_id; + + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) + return; + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + struct iwl_mvm_vif_link_info *link_info = + mvmvif->link[link_id]; + if (vif == data->vif && link_id == data->link_id) + continue; + if (link_info->active) + data->lift_block = false; + } +} + +int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + unsigned int link_id, bool active) +{ + /* An active link of a non-station vif blocks EMLSR. Upon activation + * block EMLSR on the bss vif. Upon deactivation, check if this link + * was the last non-station link active, and if so unblock the bss vif + */ + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); + struct iwl_mvm_esr_iter_data data = { + .vif = vif, + .link_id = link_id, + .lift_block = true, + }; + + if (IS_ERR_OR_NULL(bss_vif)) + return 0; + + if (active) + return iwl_mvm_block_esr_sync(mvm, bss_vif, + IWL_MVM_ESR_BLOCKED_NON_BSS); + + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_esr_vif_iterator, &data); + if (data.lift_block) { + mutex_lock(&mvm->mutex); + iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_NON_BSS); + mutex_unlock(&mvm->mutex); + } + + return 0; +} + int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, u32 changes, bool active) @@ -100,6 +212,8 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_link_config_cmd cmd = {}; u32 ht_flag, flags = 0, flags_mask = 0; int ret; + unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD); + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1); if (WARN_ON_ONCE(!link_info || link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)) @@ -121,10 +235,15 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, WARN_ON_ONCE(active == link_info->active); /* When deactivating a link session protection should - * be stopped + * be stopped. Also let the firmware know if we can't Tx. */ - if (!active && vif->type == NL80211_IFTYPE_STATION) + if (!active && vif->type == NL80211_IFTYPE_STATION) { iwl_mvm_stop_session_protection(mvm, vif); + if (link_info->csa_block_tx) { + cmd.block_tx = 1; + link_info->csa_block_tx = false; + } + } } cmd.link_id = cpu_to_le32(link_info->fw_link_id); @@ -146,7 +265,7 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid) memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN); - iwl_mvm_set_fw_basic_rates(mvm, vif, link_conf, + iwl_mvm_set_fw_basic_rates(mvm, vif, link_info, &cmd.cck_rates, &cmd.ofdm_rates); cmd.cck_short_preamble = cpu_to_le32(link_conf->use_short_preamble); @@ -181,6 +300,17 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, (link_conf->uora_ocw_range >> 3) & 0x7; } + /* ap_sta may be NULL if we're disconnecting */ + if (changes & LINK_CONTEXT_MODIFY_HE_PARAMS && mvmvif->ap_sta) { + struct ieee80211_link_sta *link_sta = + link_sta_dereference_check(mvmvif->ap_sta, link_id); + + if (!WARN_ON(!link_sta) && link_sta->he_cap.has_he && + link_sta->he_cap.he_cap_elem.mac_cap_info[5] & + IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX) + cmd.ul_mu_data_disable = 1; + } + /* TODO how to set ndp_fdbk_buff_th_exp? */ if (iwl_mvm_set_fw_mu_edca_params(mvm, mvmvif->link[link_id], @@ -190,12 +320,21 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) { + struct ieee80211_chanctx_conf *ctx; + struct cfg80211_chan_def *def = NULL; + + rcu_read_lock(); + ctx = rcu_dereference(link_conf->chanctx_conf); + if (ctx) + def = iwl_mvm_chanctx_def(mvm, ctx); + if (iwlwifi_mod_params.disable_11be || - !link_conf->eht_support) + !link_conf->eht_support || !def || + iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) >= 6) changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS; else - cmd.puncture_mask = - cpu_to_le16(link_conf->eht_puncturing); + cmd.puncture_mask = cpu_to_le16(def->punctured); + rcu_read_unlock(); } cmd.bss_color = link_conf->he_bss_color.color; @@ -222,9 +361,11 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, send_cmd: cmd.modify_mask = cpu_to_le32(changes); cmd.flags = cpu_to_le32(flags); - cmd.flags_mask = cpu_to_le32(flags_mask); + if (cmd_ver < 6) + cmd.flags_mask = cpu_to_le32(flags_mask); cmd.spec_link_id = link_conf->link_id; - cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac); + if (cmd_ver < 2) + cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac); ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_MODIFY); if (!ret && (changes & LINK_CONTEXT_MODIFY_ACTIVE)) @@ -233,6 +374,24 @@ send_cmd: return ret; } +int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link_info = + mvmvif->link[link_conf->link_id]; + + /* mac80211 thought we have the link, but it was never configured */ + if (WARN_ON(!link_info || + link_info->fw_link_id >= + ARRAY_SIZE(mvm->link_id_to_link_conf))) + return -EINVAL; + + RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id], + NULL); + return 0; +} + int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf) { @@ -242,24 +401,19 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_link_config_cmd cmd = {}; int ret; - /* mac80211 thought we have the link, but it was never configured */ - if (WARN_ON(!link_info || - link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))) + ret = iwl_mvm_unset_link_mapping(mvm, vif, link_conf); + if (ret) return 0; - RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id], - NULL); cmd.link_id = cpu_to_le32(link_info->fw_link_id); - iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id); link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; cmd.spec_link_id = link_conf->link_id; cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_REMOVE); - if (!ret) - if (iwl_mvm_sf_update(mvm, vif, true)) - IWL_ERR(mvm, "Failed to update SF state\n"); + if (!ret && iwl_mvm_sf_update(mvm, vif, true)) + IWL_ERR(mvm, "Failed to update SF state\n"); return ret; } @@ -283,3 +437,742 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return ret; } + +struct iwl_mvm_rssi_to_grade { + s8 rssi[2]; + u16 grade; +}; + +#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \ + { \ + .rssi = {_lb, _hb_uhb}, \ + .grade = _grade \ + } + +/* + * This array must be sorted by increasing RSSI for proper functionality. + * The grades are actually estimated throughput, represented as fixed-point + * with a scale factor of 1/10. + */ +static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = { + RSSI_TO_GRADE_LINE(-85, -89, 177), + RSSI_TO_GRADE_LINE(-83, -86, 344), + RSSI_TO_GRADE_LINE(-82, -85, 516), + RSSI_TO_GRADE_LINE(-80, -83, 688), + RSSI_TO_GRADE_LINE(-77, -79, 1032), + RSSI_TO_GRADE_LINE(-73, -76, 1376), + RSSI_TO_GRADE_LINE(-70, -74, 1548), + RSSI_TO_GRADE_LINE(-69, -72, 1750), + RSSI_TO_GRADE_LINE(-65, -68, 2064), + RSSI_TO_GRADE_LINE(-61, -66, 2294), + RSSI_TO_GRADE_LINE(-58, -61, 2580), + RSSI_TO_GRADE_LINE(-55, -58, 2868), + RSSI_TO_GRADE_LINE(-46, -55, 3098), + RSSI_TO_GRADE_LINE(-43, -54, 3442) +}; + +#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade) + +#define DEFAULT_CHAN_LOAD_LB 30 +#define DEFAULT_CHAN_LOAD_HB 15 +#define DEFAULT_CHAN_LOAD_UHB 0 + +/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */ +#define SCALE_FACTOR 256 + +/* Convert a percentage from [0,100] to [0,255] */ +#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100) + +static unsigned int +iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf) +{ + enum nl80211_chan_width chan_width = + link_conf->chanreq.oper.width; + int mhz = nl80211_chan_width_to_mhz(chan_width); + unsigned int n_subchannels, n_punctured, puncturing_penalty; + + if (WARN_ONCE(mhz < 20 || mhz > 320, + "Invalid channel width : (%d)\n", mhz)) + return SCALE_FACTOR; + + /* No puncturing, no penalty */ + if (mhz < 80) + return SCALE_FACTOR; + + /* total number of subchannels */ + n_subchannels = mhz / 20; + /* how many of these are punctured */ + n_punctured = hweight16(link_conf->chanreq.oper.punctured); + + puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels; + return SCALE_FACTOR - puncturing_penalty; +} + +static unsigned int +iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf) +{ + struct ieee80211_vif *vif = link_conf->vif; + struct iwl_mvm_vif_link_info *mvm_link = + iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id]; + const struct element *bss_load_elem; + const struct ieee80211_bss_load_elem *bss_load; + enum nl80211_band band = link_conf->chanreq.oper.chan->band; + const struct cfg80211_bss_ies *ies; + unsigned int chan_load; + u32 chan_load_by_us; + + rcu_read_lock(); + if (ieee80211_vif_link_active(vif, link_conf->link_id)) + ies = rcu_dereference(link_conf->bss->beacon_ies); + else + ies = rcu_dereference(link_conf->bss->ies); + + if (ies) + bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD, + ies->data, ies->len); + else + bss_load_elem = NULL; + + /* If there isn't BSS Load element, take the defaults */ + if (!bss_load_elem || + bss_load_elem->datalen != sizeof(*bss_load)) { + rcu_read_unlock(); + switch (band) { + case NL80211_BAND_2GHZ: + chan_load = DEFAULT_CHAN_LOAD_LB; + break; + case NL80211_BAND_5GHZ: + chan_load = DEFAULT_CHAN_LOAD_HB; + break; + case NL80211_BAND_6GHZ: + chan_load = DEFAULT_CHAN_LOAD_UHB; + break; + default: + chan_load = 0; + break; + } + /* The defaults are given in percentage */ + return NORMALIZE_PERCENT_TO_255(chan_load); + } + + bss_load = (const void *)bss_load_elem->data; + /* Channel util is in range 0-255 */ + chan_load = bss_load->channel_util; + rcu_read_unlock(); + + if (!mvm_link || !mvm_link->active) + return chan_load; + + if (WARN_ONCE(!mvm_link->phy_ctxt, + "Active link (%u) without phy ctxt assigned!\n", + link_conf->link_id)) + return chan_load; + + /* channel load by us is given in percentage */ + chan_load_by_us = + NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us); + + /* Use only values that firmware sends that can possibly be valid */ + if (chan_load_by_us <= chan_load) + chan_load -= chan_load_by_us; + + return chan_load; +} + +static unsigned int +iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf) +{ + return SCALE_FACTOR - iwl_mvm_get_chan_load(link_conf); +} + +/* This function calculates the grade of a link. Returns 0 in error case */ +VISIBLE_IF_IWLWIFI_KUNIT +unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf) +{ + enum nl80211_band band; + int i, rssi_idx; + s32 link_rssi; + unsigned int grade = MAX_GRADE; + + if (WARN_ON_ONCE(!link_conf)) + return 0; + + band = link_conf->chanreq.oper.chan->band; + if (WARN_ONCE(band != NL80211_BAND_2GHZ && + band != NL80211_BAND_5GHZ && + band != NL80211_BAND_6GHZ, + "Invalid band (%u)\n", band)) + return 0; + + link_rssi = MBM_TO_DBM(link_conf->bss->signal); + /* + * For 6 GHz the RSSI of the beacons is lower than + * the RSSI of the data. + */ + if (band == NL80211_BAND_6GHZ) + link_rssi += 4; + + rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1; + + /* No valid RSSI - take the lowest grade */ + if (!link_rssi) + link_rssi = rssi_to_grade_map[0].rssi[rssi_idx]; + + /* Get grade based on RSSI */ + for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) { + const struct iwl_mvm_rssi_to_grade *line = + &rssi_to_grade_map[i]; + + if (link_rssi > line->rssi[rssi_idx]) + continue; + grade = line->grade; + break; + } + + /* apply the channel load and puncturing factors */ + grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR; + grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR; + return grade; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade); + +static +u8 iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif, + struct iwl_mvm_link_sel_data *data, + unsigned long usable_links, + u8 *best_link_idx) +{ + u8 n_data = 0; + u16 max_grade = 0; + unsigned long link_id; + + /* TODO: don't select links that weren't discovered in the last scan */ + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + data[n_data].link_id = link_id; + data[n_data].chandef = &link_conf->chanreq.oper; + data[n_data].signal = link_conf->bss->signal / 100; + data[n_data].grade = iwl_mvm_get_link_grade(link_conf); + + if (data[n_data].grade > max_grade) { + max_grade = data[n_data].grade; + *best_link_idx = n_data; + } + n_data++; + } + + return n_data; +} + +struct iwl_mvm_bw_to_rssi_threshs { + s8 low; + s8 high; +}; + +#define BW_TO_RSSI_THRESHOLDS(_bw) \ + [IWL_PHY_CHANNEL_MODE ## _bw] = { \ + .low = IWL_MVM_LOW_RSSI_THRESH_##_bw##MHZ, \ + .high = IWL_MVM_HIGH_RSSI_THRESH_##_bw##MHZ \ + } + +s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm, + const struct cfg80211_chan_def *chandef, + bool low) +{ + const struct iwl_mvm_bw_to_rssi_threshs bw_to_rssi_threshs_map[] = { + BW_TO_RSSI_THRESHOLDS(20), + BW_TO_RSSI_THRESHOLDS(40), + BW_TO_RSSI_THRESHOLDS(80), + BW_TO_RSSI_THRESHOLDS(160) + /* 320 MHz has the same thresholds as 20 MHz */ + }; + const struct iwl_mvm_bw_to_rssi_threshs *threshs; + u8 chan_width = iwl_mvm_get_channel_width(chandef); + + if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ && + chandef->chan->band != NL80211_BAND_5GHZ && + chandef->chan->band != NL80211_BAND_6GHZ)) + return S8_MAX; + + /* 6 GHz will always use 20 MHz thresholds, regardless of the BW */ + if (chan_width == IWL_PHY_CHANNEL_MODE320) + chan_width = IWL_PHY_CHANNEL_MODE20; + + threshs = &bw_to_rssi_threshs_map[chan_width]; + + return low ? threshs->low : threshs->high; +} + +static u32 +iwl_mvm_esr_disallowed_with_link(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct iwl_mvm_link_sel_data *link, + bool primary) +{ + struct wiphy *wiphy = mvm->hw->wiphy; + struct ieee80211_bss_conf *conf; + enum iwl_mvm_esr_state ret = 0; + s8 thresh; + + conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]); + if (WARN_ON_ONCE(!conf)) + return false; + + /* BT Coex effects eSR mode only if one of the links is on LB */ + if (link->chandef->chan->band == NL80211_BAND_2GHZ && + (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link->signal, + primary))) + ret |= IWL_MVM_ESR_EXIT_COEX; + + thresh = iwl_mvm_get_esr_rssi_thresh(mvm, link->chandef, + false); + + if (link->signal < thresh) + ret |= IWL_MVM_ESR_EXIT_LOW_RSSI; + + if (conf->csa_active) + ret |= IWL_MVM_ESR_EXIT_CSA; + + if (ret) { + IWL_DEBUG_INFO(mvm, + "Link %d is not allowed for esr\n", + link->link_id); + iwl_mvm_print_esr_state(mvm, ret); + } + return ret; +} + +VISIBLE_IF_IWLWIFI_KUNIT +bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif, + const struct iwl_mvm_link_sel_data *a, + const struct iwl_mvm_link_sel_data *b) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + enum iwl_mvm_esr_state ret = 0; + + /* Per-link considerations */ + if (iwl_mvm_esr_disallowed_with_link(mvm, vif, a, true) || + iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false)) + return false; + + if (a->chandef->chan->band == b->chandef->chan->band || + a->chandef->width != b->chandef->width) + ret |= IWL_MVM_ESR_EXIT_BANDWIDTH; + + if (ret) { + IWL_DEBUG_INFO(mvm, + "Links %d and %d are not a valid pair for EMLSR\n", + a->link_id, b->link_id); + iwl_mvm_print_esr_state(mvm, ret); + return false; + } + + return true; + +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_mld_valid_link_pair); + +/* + * Returns the combined eSR grade of two given links. + * Returns 0 if eSR is not allowed with these 2 links. + */ +static +unsigned int iwl_mvm_get_esr_grade(struct ieee80211_vif *vif, + const struct iwl_mvm_link_sel_data *a, + const struct iwl_mvm_link_sel_data *b, + u8 *primary_id) +{ + struct ieee80211_bss_conf *primary_conf; + struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy; + unsigned int primary_load; + + lockdep_assert_wiphy(wiphy); + + /* a is always primary, b is always secondary */ + if (b->grade > a->grade) + swap(a, b); + + *primary_id = a->link_id; + + if (!iwl_mvm_mld_valid_link_pair(vif, a, b)) + return 0; + + primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]); + + if (WARN_ON_ONCE(!primary_conf)) + return 0; + + primary_load = iwl_mvm_get_chan_load(primary_conf); + + return a->grade + + ((b->grade * primary_load) / SCALE_FACTOR); +} + +void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; + struct iwl_mvm_link_sel_data *best_link; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 max_active_links = iwl_mvm_max_active_links(mvm, vif); + u16 usable_links = ieee80211_vif_usable_links(vif); + u8 best, primary_link, best_in_pair, n_data; + u16 max_esr_grade = 0, new_active_links; + + lockdep_assert_wiphy(mvm->hw->wiphy); + + if (!mvmvif->authorized || !ieee80211_vif_is_mld(vif)) + return; + + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + + /* The logic below is a simple version that doesn't suit more than 2 + * links + */ + WARN_ON_ONCE(max_active_links > 2); + + n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links, + &best); + + if (WARN(!n_data, "Couldn't find a valid grade for any link!\n")) + return; + + best_link = &data[best]; + primary_link = best_link->link_id; + new_active_links = BIT(best_link->link_id); + + /* eSR is not supported/blocked, or only one usable link */ + if (max_active_links == 1 || !iwl_mvm_vif_has_esr_cap(mvm, vif) || + mvmvif->esr_disable_reason || n_data == 1) + goto set_active; + + for (u8 a = 0; a < n_data; a++) + for (u8 b = a + 1; b < n_data; b++) { + u16 esr_grade = iwl_mvm_get_esr_grade(vif, &data[a], + &data[b], + &best_in_pair); + + if (esr_grade <= max_esr_grade) + continue; + + max_esr_grade = esr_grade; + primary_link = best_in_pair; + new_active_links = BIT(data[a].link_id) | + BIT(data[b].link_id); + } + + /* No valid pair was found, go with the best link */ + if (hweight16(new_active_links) <= 1) + goto set_active; + + /* For equal grade - prefer EMLSR */ + if (best_link->grade > max_esr_grade) { + primary_link = best_link->link_id; + new_active_links = BIT(best_link->link_id); + } +set_active: + IWL_DEBUG_INFO(mvm, "Link selection result: 0x%x. Primary = %d\n", + new_active_links, primary_link); + ieee80211_set_active_links_async(vif, new_active_links); + mvmvif->link_selection_res = new_active_links; + mvmvif->link_selection_primary = primary_link; +} + +u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* relevant data is written with both locks held, so read with either */ + lockdep_assert(lockdep_is_held(&mvmvif->mvm->mutex) || + lockdep_is_held(&mvmvif->mvm->hw->wiphy->mtx)); + + if (!ieee80211_vif_is_mld(vif)) + return 0; + + /* In AP mode, there is no primary link */ + if (vif->type == NL80211_IFTYPE_AP) + return __ffs(vif->active_links); + + if (mvmvif->esr_active && + !WARN_ON(!(BIT(mvmvif->primary_link) & vif->active_links))) + return mvmvif->primary_link; + + return __ffs(vif->active_links); +} + +/* + * For non-MLO/single link, this will return the deflink/single active link, + * respectively + */ +u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id) +{ + switch (hweight16(vif->active_links)) { + case 0: + return 0; + default: + WARN_ON(1); + fallthrough; + case 1: + return __ffs(vif->active_links); + case 2: + return __ffs(vif->active_links & ~BIT(link_id)); + } +} + +/* Reasons that can cause esr prevention */ +#define IWL_MVM_ESR_PREVENT_REASONS IWL_MVM_ESR_EXIT_MISSED_BEACON +#define IWL_MVM_PREVENT_ESR_TIMEOUT (HZ * 400) +#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300) +#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600) + +static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + enum iwl_mvm_esr_state reason) +{ + bool timeout_expired = time_after(jiffies, + mvmvif->last_esr_exit.ts + + IWL_MVM_PREVENT_ESR_TIMEOUT); + unsigned long delay; + + lockdep_assert_held(&mvm->mutex); + + /* Only handle reasons that can cause prevention */ + if (!(reason & IWL_MVM_ESR_PREVENT_REASONS)) + return false; + + /* + * Reset the counter if more than 400 seconds have passed between one + * exit and the other, or if we exited due to a different reason. + * Will also reset the counter after the long prevention is done. + */ + if (timeout_expired || mvmvif->last_esr_exit.reason != reason) { + mvmvif->exit_same_reason_count = 1; + return false; + } + + mvmvif->exit_same_reason_count++; + if (WARN_ON(mvmvif->exit_same_reason_count < 2 || + mvmvif->exit_same_reason_count > 3)) + return false; + + mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION; + + /* + * For the second exit, use a short prevention, and for the third one, + * use a long prevention. + */ + delay = mvmvif->exit_same_reason_count == 2 ? + IWL_MVM_ESR_PREVENT_SHORT : + IWL_MVM_ESR_PREVENT_LONG; + + IWL_DEBUG_INFO(mvm, + "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n", + delay / HZ, mvmvif->exit_same_reason_count, + iwl_get_esr_state_string(reason), reason); + + wiphy_delayed_work_queue(mvm->hw->wiphy, + &mvmvif->prevent_esr_done_wk, delay); + return true; +} + +#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ) + +/* API to exit eSR mode */ +void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason, + u8 link_to_keep) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 new_active_links; + bool prevented; + + lockdep_assert_held(&mvm->mutex); + + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + + /* Nothing to do */ + if (!mvmvif->esr_active) + return; + + if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mvmvif->authorized)) + return; + + if (WARN_ON(!(vif->active_links & BIT(link_to_keep)))) + link_to_keep = __ffs(vif->active_links); + + new_active_links = BIT(link_to_keep); + IWL_DEBUG_INFO(mvm, + "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n", + iwl_get_esr_state_string(reason), reason, + vif->active_links, new_active_links); + + ieee80211_set_active_links_async(vif, new_active_links); + + /* Prevent EMLSR if needed */ + prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason); + + /* Remember why and when we exited EMLSR */ + mvmvif->last_esr_exit.ts = jiffies; + mvmvif->last_esr_exit.reason = reason; + + /* + * If EMLSR is prevented now - don't try to get back to EMLSR. + * If we exited due to a blocking event, we will try to get back to + * EMLSR when the corresponding unblocking event will happen. + */ + if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS) + return; + + /* If EMLSR is not blocked - try enabling it again in 30 seconds */ + wiphy_delayed_work_queue(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk, + round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME)); +} + +void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason, + u8 link_to_keep) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + + /* This should be called only with disable reasons */ + if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS))) + return; + + if (mvmvif->esr_disable_reason & reason) + return; + + IWL_DEBUG_INFO(mvm, + "Blocking EMLSR mode. reason = %s (0x%x)\n", + iwl_get_esr_state_string(reason), reason); + + mvmvif->esr_disable_reason |= reason; + + iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason); + + iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep); +} + +int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason) +{ + int primary_link = iwl_mvm_get_primary_link(vif); + int ret; + + if (!IWL_MVM_AUTO_EML_ENABLE || !ieee80211_vif_is_mld(vif)) + return 0; + + /* This should be called only with blocking reasons */ + if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS))) + return 0; + + /* leave ESR immediately, not only async with iwl_mvm_block_esr() */ + ret = ieee80211_set_active_links(vif, BIT(primary_link)); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + /* only additionally block for consistency and to avoid concurrency */ + iwl_mvm_block_esr(mvm, vif, reason, primary_link); + mutex_unlock(&mvm->mutex); + + return 0; +} + +static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts + + IWL_MVM_TRIGGER_LINK_SEL_TIME); + + lockdep_assert_held(&mvm->mutex); + + if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized || + mvmvif->esr_active) + return; + + IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n"); + + /* If we exited due to an EXIT reason, and the exit was in less than + * 30 seconds, then a MLO scan was scheduled already. + */ + if (!need_new_sel && + !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) { + IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n"); + return; + } + + /* + * If EMLSR was blocked for more than 30 seconds, or the last link + * selection decided to not enter EMLSR, trigger a new scan. + */ + if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) { + IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n"); + wiphy_delayed_work_queue(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk, 0); + /* + * If EMLSR was blocked for less than 30 seconds, and the last link + * selection decided to use EMLSR, activate EMLSR using the previous + * link selection result. + */ + } else { + IWL_DEBUG_INFO(mvm, + "Use the latest link selection result: 0x%x\n", + mvmvif->link_selection_res); + ieee80211_set_active_links_async(vif, + mvmvif->link_selection_res); + } +} + +void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + + /* This should be called only with disable reasons */ + if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS))) + return; + + /* No Change */ + if (!(mvmvif->esr_disable_reason & reason)) + return; + + mvmvif->esr_disable_reason &= ~reason; + + IWL_DEBUG_INFO(mvm, + "Unblocking EMLSR mode. reason = %s (0x%x)\n", + iwl_get_esr_state_string(reason), reason); + iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason); + + if (!mvmvif->esr_disable_reason) + iwl_mvm_esr_unblocked(mvm, vif); +} + +void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link) +{ + link->bcast_sta.sta_id = IWL_INVALID_STA; + link->mcast_sta.sta_id = IWL_INVALID_STA; + link->ap_sta_id = IWL_INVALID_STA; + + for (int r = 0; r < NUM_IWL_MVM_SMPS_REQ; r++) + link->smps_requests[r] = + IEEE80211_SMPS_AUTOMATIC; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 25a5a31e63c2..6b06732441c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -12,6 +12,7 @@ #include "fw-api.h" #include "mvm.h" #include "time-event.h" +#include "iwl-utils.h" const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_VO, @@ -216,7 +217,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) .preferred_tsf = NUM_TSF_IDS, .found_vif = false, }; - int ret, i; + int ret; lockdep_assert_held(&mvm->mutex); @@ -296,10 +297,9 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) INIT_LIST_HEAD(&mvmvif->time_event_data.list); mvmvif->time_event_data.id = TE_MAX; + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; - mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; + iwl_mvm_init_link(&mvmvif->deflink); /* No need to allocate data queues to P2P Device MAC and NAN.*/ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) @@ -315,9 +315,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } - for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) - mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; - return 0; exit_fail: @@ -412,19 +409,18 @@ static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, } void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, + struct iwl_mvm_vif_link_info *link_info, __le32 *cck_rates, __le32 *ofdm_rates) { - struct ieee80211_chanctx_conf *chanctx; + struct iwl_mvm_phy_ctxt *phy_ctxt; u8 cck_ack_rates = 0, ofdm_ack_rates = 0; + enum nl80211_band band = NL80211_BAND_2GHZ; - rcu_read_lock(); - chanctx = rcu_dereference(link_conf->chanctx_conf); - iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band - : NL80211_BAND_2GHZ, - &cck_ack_rates, &ofdm_ack_rates); + phy_ctxt = link_info->phy_ctxt; + if (phy_ctxt && phy_ctxt->channel) + band = phy_ctxt->channel->band; - rcu_read_unlock(); + iwl_mvm_ack_rates(mvm, vif, band, &cck_ack_rates, &ofdm_ack_rates); *cck_rates = cpu_to_le32((u32)cck_ack_rates); *ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); @@ -466,7 +462,7 @@ void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm, break; case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: /* Protect when channel wider than 20MHz */ - if (link_conf->chandef.width > NL80211_CHAN_WIDTH_20) + if (link_conf->chanreq.oper.width > NL80211_CHAN_WIDTH_20) *protection_flags |= cpu_to_le32(ht_flag); break; default: @@ -505,7 +501,7 @@ void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (link_conf->qos) *qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); - if (link_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT) + if (link_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT) *qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); } @@ -562,7 +558,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, else eth_broadcast_addr(cmd->bssid_addr); - iwl_mvm_set_fw_basic_rates(mvm, vif, &vif->bss_conf, &cmd->cck_rates, + iwl_mvm_set_fw_basic_rates(mvm, vif, &mvmvif->deflink, &cmd->cck_rates, &cmd->ofdm_rates); cmd->cck_short_preamble = @@ -873,23 +869,6 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, } } -static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) -{ - struct ieee80211_mgmt *mgmt = (void *)beacon; - const u8 *ie; - - if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon))) - return 0; - - frame_size -= mgmt->u.beacon.variable - beacon; - - ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size); - if (!ie) - return 0; - - return ie - beacon; -} - u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_vif *vif) @@ -921,8 +900,8 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm, link_conf = rcu_dereference(vif->link_conf[link_id]); if (link_conf) { basic = link_conf->basic_rates; - if (link_conf->chandef.chan) - band = link_conf->chandef.chan->band; + if (link_conf->chanreq.oper.chan) + band = link_conf->chanreq.oper.chan->band; } rcu_read_unlock(); } @@ -1010,12 +989,13 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, tx->tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) + IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) { iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); - tx->rate_n_flags = - cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << - RATE_MCS_ANT_POS); + tx->rate_n_flags = + cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << + RATE_MCS_ANT_POS); + } rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif); @@ -1082,22 +1062,23 @@ static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, beacon->data, beacon->len); beacon_cmd.csa_offset = - cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_CHANNEL_SWITCH, - beacon->len)); + cpu_to_le32(iwl_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon->len)); beacon_cmd.ecsa_offset = - cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_EXT_CHANSWITCH_ANN, - beacon->len)); + cpu_to_le32(iwl_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } bool iwl_mvm_enable_fils(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { - if (IWL_MVM_DISABLE_AP_FILS) + if (vif->type != NL80211_IFTYPE_AP || IWL_MVM_DISABLE_AP_FILS) return false; if (cfg80211_channel_is_psc(ctx->def.chan)) @@ -1126,7 +1107,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, ctx = rcu_dereference(link_conf->chanctx_conf); channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq); WARN_ON(channel == 0); - if (iwl_mvm_enable_fils(mvm, ctx)) { + if (iwl_mvm_enable_fils(mvm, vif, ctx)) { flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 10 ? IWL_MAC_BEACON_FILS : @@ -1155,13 +1136,20 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, beacon->data, beacon->len); beacon_cmd.csa_offset = - cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_CHANNEL_SWITCH, - beacon->len)); + cpu_to_le32(iwl_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon->len)); beacon_cmd.ecsa_offset = - cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_EXT_CHANSWITCH_ANN, - beacon->len)); + cpu_to_le32(iwl_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon->len)); + + if (vif->type == NL80211_IFTYPE_AP && + iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) >= 14) + beacon_cmd.btwt_offset = + cpu_to_le32(iwl_find_ie_offset(beacon->data, + WLAN_EID_S1G_TWT, + beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); @@ -1477,8 +1465,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, mvmvif->csa_countdown = true; - if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) { - int c = ieee80211_beacon_update_cntdwn(csa_vif); + if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) { + int c = ieee80211_beacon_update_cntdwn(csa_vif, 0); iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif, &csa_vif->bss_conf); @@ -1497,7 +1485,7 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, } } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) { /* we don't have CSA NoA scheduled yet, switch now */ - ieee80211_csa_finish(csa_vif); + ieee80211_csa_finish(csa_vif, 0); RCU_INIT_POINTER(mvm->csa_vif, NULL); } } @@ -1519,7 +1507,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) { - struct iwl_mvm_tx_resp *beacon_notify_hdr = + struct iwl_tx_resp *beacon_notify_hdr = &beacon_v5->beacon_notify_hdr; if (unlikely(pkt_len < sizeof(*beacon_v5))) @@ -1577,11 +1565,11 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, } } -void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) +static void +iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm, + const struct iwl_missed_beacons_notif *mb, + struct iwl_rx_packet *pkt) { - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_beacons_notif *mb = (void *)pkt->data; struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; struct iwl_fw_dbg_trigger_tlv *trigger; u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; @@ -1591,36 +1579,45 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, u32 id = le32_to_cpu(mb->link_id); union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; u32 mac_type; + int link_id = -1; u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, MISSED_BEACONS_NOTIFICATION, 0); - - rcu_read_lock(); + u8 new_notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, + MISSED_BEACONS_NOTIF, 0); + struct ieee80211_bss_conf *bss_conf; + + /* If the firmware uses the new notification (from MAC_CONF_GROUP), + * refer to that notification's version. + * Note that the new notification from MAC_CONF_GROUP starts from + * version 5. + */ + if (new_notif_ver) + notif_ver = new_notif_ver; /* before version four the ID in the notification refers to mac ID */ if (notif_ver < 4) { - vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false); + bss_conf = &vif->bss_conf; } else { - struct ieee80211_bss_conf *bss_conf = - iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true); + bss_conf = iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, false); if (!bss_conf) - goto out; + return; vif = bss_conf->vif; + link_id = bss_conf->link_id; } IWL_DEBUG_INFO(mvm, - "missed bcn %s_id=%u, consecutive=%u (%u, %u, %u)\n", + "missed bcn %s_id=%u, consecutive=%u (%u)\n", notif_ver < 4 ? "mac" : "link", id, le32_to_cpu(mb->consec_missed_beacons), - le32_to_cpu(mb->consec_missed_beacons_since_last_rx), - le32_to_cpu(mb->num_recvd_beacons), - le32_to_cpu(mb->num_expected_beacons)); + le32_to_cpu(mb->consec_missed_beacons_since_last_rx)); if (!vif) - goto out; + return; mac_type = iwl_mvm_get_mac_type(vif); @@ -1637,10 +1634,52 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, * TODO: the threshold should be adjusted based on latency conditions, * and/or in case of a CS flow on one of the other AP vifs. */ - if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) - iwl_mvm_connection_loss(mvm, vif, "missed beacons"); - else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) - ieee80211_beacon_loss(vif); + if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) { + if (rx_missed_bcon_since_rx >= IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD) { + iwl_mvm_connection_loss(mvm, vif, "missed beacons"); + } else { + IWL_WARN(mvm, + "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n"); + IWL_WARN(mvm, + "missed_beacons:%d, missed_beacons_since_rx:%d\n", + rx_missed_bcon, rx_missed_bcon_since_rx); + } + } else if (link_id >= 0 && hweight16(vif->active_links) > 1) { + u32 bss_param_ch_cnt_link_id = + bss_conf->bss_param_ch_cnt_link_id; + u32 scnd_lnk_bcn_lost = 0; + + if (notif_ver >= 5 && + !IWL_FW_CHECK(mvm, + le32_to_cpu(mb->other_link_id) == IWL_MVM_FW_LINK_ID_INVALID, + "No data for other link id but we are in EMLSR active_links: 0x%x\n", + vif->active_links)) + scnd_lnk_bcn_lost = + le32_to_cpu(mb->consec_missed_beacons_other_link); + + /* Exit EMLSR if we lost more than + * IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links + * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH on any link. + * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED + * and the link's bss_param_ch_count has changed. + */ + if ((rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS && + scnd_lnk_bcn_lost >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) || + rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH || + (bss_param_ch_cnt_link_id != link_id && + rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED)) + iwl_mvm_exit_esr(mvm, vif, + IWL_MVM_ESR_EXIT_MISSED_BEACON, + iwl_mvm_get_primary_link(vif)); + } else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) { + if (!iwl_mvm_has_new_tx_api(mvm)) + ieee80211_beacon_loss(vif); + else + ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC); + + /* try to switch links, no-op if we don't have MLO */ + iwl_mvm_int_mlo_scan(mvm, vif); + } iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data); @@ -1648,7 +1687,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS); if (!trigger) - goto out; + return; bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); @@ -1660,9 +1699,31 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); +} -out: - rcu_read_unlock(); +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + iwl_mvm_handle_missed_beacons_notif(mvm, (const void *)pkt->data, pkt); +} + +void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + const struct iwl_missed_beacons_notif_v4 *mb_v4 = + (const void *)pkt->data; + struct iwl_missed_beacons_notif mb = { + .link_id = mb_v4->link_id, + .consec_missed_beacons = mb_v4->consec_missed_beacons, + .consec_missed_beacons_since_last_rx = + mb_v4->consec_missed_beacons_since_last_rx, + .other_link_id = cpu_to_le32(IWL_MVM_FW_LINK_ID_INVALID), + }; + + iwl_mvm_handle_missed_beacons_notif(mvm, &mb, pkt); } void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, @@ -1691,7 +1752,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, data = sb_v2->data; } else { - struct iwl_stored_beacon_notif_v3 *sb_v3 = (void *)pkt->data; + struct iwl_stored_beacon_notif *sb_v3 = (void *)pkt->data; if (pkt_len < struct_size(sb_v3, data, size)) return; @@ -1843,7 +1904,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT * csa_vif->bss_conf.beacon_int)); - ieee80211_csa_finish(csa_vif); + ieee80211_csa_finish(csa_vif, 0); rcu_read_unlock(); @@ -1896,7 +1957,7 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, if (csa_err_mask & (CS_ERR_COUNT_ERROR | CS_ERR_LONG_DELAY_AFTER_CS | CS_ERR_TX_BLOCK_TIMER_EXPIRED)) - ieee80211_channel_switch_disconnect(vif, true); + ieee80211_channel_switch_disconnect(vif); rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 53e26c3c3a9a..af6644b7e95f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -22,7 +22,7 @@ #include "mvm.h" #include "sta.h" #include "time-event.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "iwl-phy-db.h" #include "testmode.h" #include "fw/error-dump.h" @@ -30,21 +30,28 @@ #include "iwl-nvm-parse.h" #include "time-sync.h" +#define IWL_MVM_LIMITS(ap) \ + { \ + .max = 1, \ + .types = BIT(NL80211_IFTYPE_STATION), \ + }, \ + { \ + .max = 1, \ + .types = ap | \ + BIT(NL80211_IFTYPE_P2P_CLIENT) | \ + BIT(NL80211_IFTYPE_P2P_GO), \ + }, \ + { \ + .max = 1, \ + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \ + } + static const struct ieee80211_iface_limit iwl_mvm_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_DEVICE), - }, + IWL_MVM_LIMITS(0) +}; + +static const struct ieee80211_iface_limit iwl_mvm_limits_ap[] = { + IWL_MVM_LIMITS(BIT(NL80211_IFTYPE_AP)) }; static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { @@ -54,6 +61,12 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { .limits = iwl_mvm_limits, .n_limits = ARRAY_SIZE(iwl_mvm_limits), }, + { + .num_different_channels = 1, + .max_interfaces = 3, + .limits = iwl_mvm_limits_ap, + .n_limits = ARRAY_SIZE(iwl_mvm_limits_ap), + }, }; static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { @@ -152,12 +165,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, mvm->lar_regdom_set = true; mvm->mcc_src = src_id; - /* Some kind of regulatory mess means we need to currently disallow - * puncturing in the US and Canada. Do that here, at least until we - * figure out the new chanctx APIs for puncturing. - */ - if (resp->mcc == cpu_to_le16(IWL_MCC_US) || - resp->mcc == cpu_to_le16(IWL_MCC_CANADA)) + if (!iwl_puncturing_is_allowed_in_bios(mvm->bios_enable_puncturing, + le16_to_cpu(resp->mcc))) ieee80211_hw_set(mvm->hw, DISALLOW_PUNCTURING); else __clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mvm->hw->flags); @@ -263,6 +272,9 @@ static const u8 tm_if_types_ext_capa_sta[] = { __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \ __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY)) +#define IWL_MVM_MLD_CAPA_OPS FIELD_PREP_CONST( \ + IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \ + IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = { { @@ -272,6 +284,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = { .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), /* relevant only if EHT is supported */ .eml_capabilities = IWL_MVM_EMLSR_CAPA, + .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS, }, { .iftype = NL80211_IFTYPE_STATION, @@ -280,6 +293,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = { .extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta), /* relevant only if EHT is supported */ .eml_capabilities = IWL_MVM_EMLSR_CAPA, + .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS, }, }; @@ -353,8 +367,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) /* Set this early since we need to have it for the check below */ if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable && !iwlwifi_mod_params.disable_11ax && - !iwlwifi_mod_params.disable_11be) + !iwlwifi_mod_params.disable_11be) { hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + /* we handle this already earlier, but need it for MLO */ + ieee80211_hw_set(hw, HANDLES_QUIET_CSA); + } /* With MLD FW API, it tracks timing by itself, * no need for any timing from the host @@ -362,12 +379,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (!mvm->mld_api_is_used) ieee80211_hw_set(hw, TIMING_BEACON_ONLY); - /* We should probably have this, but mac80211 - * currently doesn't support it for MLO. - */ - if (!(hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) - ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); - /* * On older devices, enabling TX A-MSDU occasionally leads to * something getting messed up, the command read from the FIFO @@ -490,6 +501,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM)) hw->wiphy->hw_timestamp_max_peers = 1; + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT)) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT); + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | @@ -565,13 +581,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); - BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || - IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); + BUILD_BUG_ON(IWL_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || + IWL_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; + mvm->max_scans = IWL_MAX_UMAC_SCANS; else - mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; + mvm->max_scans = IWL_MAX_LMAC_SCANS; if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_2GHZ] = @@ -619,10 +635,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_P2P_GO_OPPPS | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | - NL80211_FEATURE_DYNAMIC_SMPS | - NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; + /* when firmware supports RLC/SMPS offload, do not set these + * driver features, since it's no longer supported by driver. + */ + if (!iwl_mvm_has_rlc_offload(mvm)) + hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS | + NL80211_FEATURE_DYNAMIC_SMPS; + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; @@ -640,7 +661,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, - IWL_FW_CMD_VER_UNKNOWN) == 3) + IWL_FW_CMD_VER_UNKNOWN) >= 3) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; if (fw_has_api(&mvm->fw->ucode_capa, @@ -695,12 +716,24 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) } } + if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, + TOF_RANGE_REQ_CMD), + IWL_FW_CMD_VER_UNKNOWN) >= 11) { + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE); + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_SECURE_LTF); + } + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; + ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ); + #ifdef CONFIG_PM_SLEEP if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && - mvm->trans->ops->d3_suspend && - mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | @@ -795,7 +828,7 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw, } if (offchannel && - !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && + !test_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; @@ -806,20 +839,10 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw, if (ieee80211_is_mgmt(hdr->frame_control)) sta = NULL; - /* If there is no sta, and it's not offchannel - send through AP */ + /* this shouldn't even happen: just drop */ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && - !offchannel) { - struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(info->control.vif); - u8 ap_sta_id = READ_ONCE(mvmvif->deflink.ap_sta_id); - - if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { - /* mac80211 holds rcu read lock */ - sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); - if (IS_ERR_OR_NULL(sta)) - goto drop; - } - } + !offchannel) + goto drop; if (tmp_sta && !sta && link_id != IEEE80211_LINK_UNSPECIFIED && !ieee80211_is_probe_resp(hdr->frame_control)) { @@ -880,6 +903,8 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) &mvmtxq->state) && !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &mvmtxq->state) && + !test_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, + &mvmtxq->state) && !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { skb = ieee80211_tx_dequeue(hw, txq); @@ -1074,15 +1099,23 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); - memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; + + mvmvif->bf_enabled = false; + mvmvif->ba_enabled = false; mvmvif->ap_sta = NULL; + mvmvif->esr_active = false; + vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE; + for_each_mvm_vif_valid_link(mvmvif, link_id) { - mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->link[link_id]->ap_sta_id = IWL_INVALID_STA; mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; mvmvif->link[link_id]->phy_ctxt = NULL; mvmvif->link[link_id]->active = 0; mvmvif->link[link_id]->igtk = NULL; + memset(&mvmvif->link[link_id]->bf_data, 0, + sizeof(mvmvif->link[link_id]->bf_data)); } probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, @@ -1092,6 +1125,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL); } +static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = data; + struct iwl_mvm_sta *mvm_sta; + struct ieee80211_vif *vif; + int link_id; + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + vif = mvm_sta->vif; + + if (!sta->valid_links) + return; + + for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) { + struct iwl_mvm_link_sta *mvm_link_sta; + + mvm_link_sta = + rcu_dereference_check(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + if (mvm_link_sta && !(vif->active_links & BIT(link_id))) { + /* + * We have a link STA but the link is inactive in + * mac80211. This will happen if we failed to + * deactivate the link but mac80211 roll back the + * deactivation of the link. + * Delete the stale data to avoid issues later on. + */ + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, + link_id); + } + } +} + static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { iwl_mvm_stop_device(mvm); @@ -1114,6 +1180,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) */ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); + /* cleanup stations as links may be gone after restart */ + ieee80211_iterate_stations_atomic(mvm->hw, + iwl_mvm_cleanup_sta_iterator, mvm); + mvm->p2p_device_vif = NULL; iwl_mvm_reset_phy_ctxts(mvm); @@ -1136,6 +1206,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) int __iwl_mvm_mac_start(struct iwl_mvm *mvm) { + bool fast_resume = false; int ret; lockdep_assert_held(&mvm->mutex); @@ -1161,6 +1232,31 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) mvm->nvm_data = NULL; } +#ifdef CONFIG_PM_SLEEP + /* fast_resume will be cleared by iwl_mvm_fast_resume */ + fast_resume = mvm->fast_resume; + + if (fast_resume) { + iwl_mvm_mei_device_state(mvm, true); + ret = iwl_mvm_fast_resume(mvm); + if (ret) { + iwl_mvm_stop_device(mvm); + /* iwl_mvm_up() will be called further down */ + } else { + /* + * We clear IWL_MVM_STATUS_FIRMWARE_RUNNING upon + * mac_down() so that debugfs will stop honoring + * requests after we flush all the workers. + * Set the IWL_MVM_STATUS_FIRMWARE_RUNNING bit again + * now that we are back. This is a bit abusing the + * flag since the firmware wasn't really ever stopped, + * but this still serves the purpose. + */ + set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); + } + } +#endif /* CONFIG_PM_SLEEP */ + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART @@ -1171,7 +1267,10 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); } - ret = iwl_mvm_up(mvm); + + /* we also want to load the firmware if fast_resume failed */ + if (!fast_resume || ret) + ret = iwl_mvm_up(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, NULL); @@ -1201,23 +1300,16 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw) /* we are starting the mac not in error flow, and restart is enabled */ if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && - iwlwifi_mod_params.fw_restart) { + iwlwifi_mod_params.fw_restart) max_retry = IWL_MAX_INIT_RETRY; - /* - * This will prevent mac80211 recovery flows to trigger during - * init failures - */ - set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); - } for (retry = 0; retry <= max_retry; retry++) { ret = __iwl_mvm_mac_start(mvm); - if (!ret || mvm->pldr_sync) + if (ret != -ETIMEDOUT) break; IWL_ERR(mvm, "mac start retry %d\n", retry); } - clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); mutex_unlock(&mvm->mutex); @@ -1230,7 +1322,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) { int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); @@ -1247,7 +1339,12 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) */ iwl_mvm_teardown_tdls_peers(mvm); - mutex_unlock(&mvm->mutex); + IWL_INFO(mvm, "restart completed\n"); + iwl_trans_finish_sw_reset(mvm->trans); + + /* no need to lock, adding in parallel would schedule too */ + if (!list_empty(&mvm->add_stream_txqs)) + schedule_work(&mvm->add_stream_wk); } void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, @@ -1264,7 +1361,7 @@ void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, } } -void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) +void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend) { lockdep_assert_held(&mvm->mutex); @@ -1280,7 +1377,14 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) if (!iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); - iwl_mvm_stop_device(mvm); + if (suspend && + mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + iwl_mvm_fast_suspend(mvm); + /* From this point on, we won't touch the device */ + iwl_mvm_mei_device_state(mvm, false); + } else { + iwl_mvm_stop_device(mvm); + } iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ @@ -1313,10 +1417,17 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) } } -void iwl_mvm_mac_stop(struct ieee80211_hw *hw) +void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + /* Stop internal MLO scan, if running */ + mutex_lock(&mvm->mutex); + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false); + mutex_unlock(&mvm->mutex); + + wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk); + wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk); flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); @@ -1342,7 +1453,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw) iwl_mvm_mei_set_sw_rfkill_state(mvm); mutex_lock(&mvm->mutex); - __iwl_mvm_mac_stop(mvm); + __iwl_mvm_mac_stop(mvm, suspend); mutex_unlock(&mvm->mutex); /* @@ -1350,6 +1461,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw) * discover that its list is now empty. */ cancel_work_sync(&mvm->async_handlers_wk); + wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk); } struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) @@ -1366,24 +1478,48 @@ struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) return NULL; } -int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, +int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, + struct ieee80211_bss_conf *link_conf, s16 tx_power) { u32 cmd_id = REDUCE_TX_POWER_CMD; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(link_conf->vif); + u32 mac_id = mvmvif->id; int len; - struct iwl_dev_tx_power_cmd cmd = { - .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), - .common.mac_context_id = - cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), - .common.pwr_restriction = cpu_to_le16(8 * tx_power), + struct iwl_dev_tx_power_cmd_v3_v8 cmd = { + .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK), + .common.link_id = cpu_to_le32(mac_id), }; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); + struct iwl_dev_tx_power_cmd cmd_v9_v10; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3); + u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ? + IWL_DEV_MAX_TX_POWER : 8 * tx_power; + void *cmd_data = &cmd; - if (tx_power == IWL_DEFAULT_MAX_TX_POWER) - cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + cmd.common.pwr_restriction = cpu_to_le16(u_tx_power); - if (cmd_ver == 7) + if (cmd_ver > 8) { + u32 link_id; + + if (WARN_ON(!mvmvif->link[link_conf->link_id])) + return -ENODEV; + + link_id = mvmvif->link[link_conf->link_id]->fw_link_id; + + /* Those fields sit on the same place for v9 and v10 */ + cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK); + cmd_v9_v10.common.link_id = cpu_to_le32(link_id); + cmd_v9_v10.common.pwr_restriction = cpu_to_le16(u_tx_power); + cmd_data = &cmd_v9_v10; + } + + if (cmd_ver == 10) + len = sizeof(cmd_v9_v10.v10); + else if (cmd_ver == 9) + len = sizeof(cmd_v9_v10.v9); + else if (cmd_ver == 8) + len = sizeof(cmd.v8); + else if (cmd_ver == 7) len = sizeof(cmd.v7); else if (cmd_ver == 6) len = sizeof(cmd.v6); @@ -1396,10 +1532,28 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, else len = sizeof(cmd.v3); - /* all structs have the same common part, add it */ + /* all structs have the same common part, add its length */ len += sizeof(cmd.common); - return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); + if (cmd_ver < 9) + len += sizeof(cmd.per_band); + + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data); + +} + +static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta) +{ + struct ieee80211_hw *hw = data; + int i; + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state); + iwl_mvm_mac_itxq_xmit(hw, sta->txq[i]); + } } int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, @@ -1418,6 +1572,7 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, u8 ap_sta_id = mvmvif->link[link_id]->ap_sta_id; mvmvif->csa_bcn_pending = false; + mvmvif->csa_blocks_tx = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); if (WARN_ON(!mvmsta)) { @@ -1433,12 +1588,24 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + ret = iwl_mvm_enable_beacon_filter(mvm, vif); if (ret) goto out_unlock; iwl_mvm_stop_session_protection(mvm, vif); } + } else if (vif->type == NL80211_IFTYPE_AP && mvmvif->csa_blocks_tx) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(vif->txq); + + clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state); + + local_bh_disable(); + iwl_mvm_mac_itxq_xmit(hw, vif->txq); + ieee80211_iterate_stations_atomic(hw, iwl_mvm_post_csa_tx, hw); + local_bh_enable(); + + mvmvif->csa_blocks_tx = false; } mvmvif->ps_disabled = false; @@ -1454,7 +1621,8 @@ out_unlock: } void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1547,6 +1715,80 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm, IWL_STA_MULTICAST); } +static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy, + struct wiphy_work *wk) +{ + struct iwl_mvm_vif *mvmvif = + container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work); + struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_vif *vif = + container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); + + guard(mvm)(mvm); + iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION); +} + +static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif, + mlo_int_scan_wk.work); + struct ieee80211_vif *vif = + container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); + + guard(mvm)(mvmvif->mvm); + iwl_mvm_int_mlo_scan(mvmvif->mvm, vif); +} + +static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mvm_vif *mvmvif = + container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk); + struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_vif *vif = + container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); + + guard(mvm)(mvm); + iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT); +} + +static void iwl_mvm_unblock_esr_tmp_non_bss(struct wiphy *wiphy, + struct wiphy_work *wk) +{ + struct iwl_mvm_vif *mvmvif = + container_of(wk, struct iwl_mvm_vif, + unblock_esr_tmp_non_bss_wk.work); + struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_vif *vif = + container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); + + mutex_lock(&mvm->mutex); + iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TMP_NON_BSS); + mutex_unlock(&mvm->mutex); +} + +void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif) +{ + lockdep_assert_held(&mvm->mutex); + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return; + + INIT_DELAYED_WORK(&mvmvif->csa_work, + iwl_mvm_channel_switch_disconnect_wk); + + wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk, + iwl_mvm_prevent_esr_done_wk); + + wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk, + iwl_mvm_mlo_int_scan_wk); + + wiphy_work_init(&mvmvif->unblock_esr_tpt_wk, + iwl_mvm_unblock_esr_tpt); + + wiphy_delayed_work_init(&mvmvif->unblock_esr_tmp_non_bss_wk, + iwl_mvm_unblock_esr_tmp_non_bss); +} + static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1557,11 +1799,21 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + iwl_mvm_mac_init_mvmvif(mvm, mvmvif); + mvmvif->mvm = mvm; /* the first link always points to the default one */ + mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; + mvmvif->deflink.active = 0; mvmvif->link[0] = &mvmvif->deflink; + vif->driver_flags = IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC; + + ret = iwl_mvm_set_link_mapping(mvm, vif, &vif->bss_conf); + if (ret) + goto out; + /* * Not much to do here. The stack will not allow interface * types or combinations that we didn't advertise, so we @@ -1617,7 +1869,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, goto out_remove_mac; /* beacon filtering */ - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + ret = iwl_mvm_disable_beacon_filter(mvm, vif); if (ret) goto out_remove_mac; @@ -1632,13 +1884,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mvm->p2p_device_vif = vif; iwl_mvm_tcm_add_vif(mvm, vif); - INIT_DELAYED_WORK(&mvmvif->csa_work, - iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) { mvm->monitor_on = true; mvm->monitor_p80 = - iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef); + iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chanreq.oper); } if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) @@ -1671,6 +1921,8 @@ out: void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. @@ -1679,14 +1931,22 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, */ flush_work(&mvm->roc_done_wk); } + + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->prevent_esr_done_wk); + + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk); + + wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk); + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->unblock_esr_tmp_non_bss_wk); + + cancel_delayed_work_sync(&mvmvif->csa_work); } -/* This function is doing the common part of removing the interface for - * both - MLD and non-MLD modes. Returns true if removing the interface - * is done - */ -static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1734,21 +1994,10 @@ static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw, mvm->noa_duration = 0; } #endif - return true; + goto out; } iwl_mvm_power_update_mac(mvm); - return false; -} - -static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (iwl_mvm_mac_remove_interface_common(hw, vif)) - goto out; /* Before the interface removal, mac80211 would cancel the ROC, and the * ROC worker would be scheduled if needed. The worker would be flushed @@ -1771,6 +2020,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->monitor_on = false; out: + iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf); if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.mcast_sta); @@ -1895,7 +2145,7 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); /* replace previous configuration */ kfree(mvm->mcast_filter_cmd); @@ -1912,7 +2162,6 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw, iwl_mvm_recalc_multicast(mvm); out: - mutex_unlock(&mvm->mutex); *total_flags = 0; } @@ -1932,9 +2181,8 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, !vif->p2p) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - mutex_unlock(&mvm->mutex); } int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -2518,6 +2766,7 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + int link_id; /* The firmware tracks the MU-MIMO group on its own. * However, on HW restart we should restore this data. @@ -2533,7 +2782,8 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, iwl_mvm_recalc_multicast(mvm); /* reset rssi values */ - mvmvif->bf_data.ave_beacon_signal = 0; + for_each_mvm_vif_valid_link(mvmvif, link_id) + mvmvif->link[link_id]->bf_data.ave_beacon_signal = 0; iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_TT, @@ -2560,7 +2810,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, iwl_mvm_stop_session_protection(mvm, vif); iwl_mvm_sf_update(mvm, vif, false); - WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif)); } if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | @@ -2574,14 +2824,18 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, } if (changes & BSS_CHANGED_CQM) { - IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); - /* reset cqm events tracking */ - mvmvif->bf_data.last_cqm_event = 0; - if (mvmvif->bf_data.bf_enabled) { + struct iwl_mvm_vif_link_info *link_info = + mvmvif->link[link_conf->link_id]; + + IWL_DEBUG_MAC80211(mvm, "CQM info_changed\n"); + if (link_info) + link_info->bf_data.last_cqm_event = 0; + + if (mvmvif->bf_enabled) { /* FIXME: need to update per link when FW API will * support it */ - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + ret = iwl_mvm_enable_beacon_filter(mvm, vif); if (ret) IWL_ERR(mvm, "failed to update CQM thresholds\n"); @@ -2590,6 +2844,13 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_BANDWIDTH) iwl_mvm_update_link_smps(vif, link_conf); + + if (changes & BSS_CHANGED_TPE) { + IWL_DEBUG_CALIB(mvm, "Changing TPE\n"); + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, + link_conf, + false); + } } static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, @@ -2608,9 +2869,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, */ if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) { if ((vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) || - (vif->bss_conf.eht_support && - !iwlwifi_mod_params.disable_11be)) + !iwlwifi_mod_params.disable_11ax)) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); @@ -2619,10 +2878,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, /* Update MU EDCA params */ if (changes & BSS_CHANGED_QOS && mvmvif->associated && vif->cfg.assoc && - ((vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) || - (vif->bss_conf.eht_support && - !iwlwifi_mod_params.disable_11be))) + (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax)) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id); /* @@ -2644,6 +2900,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) { + mvmvif->session_prot_connection_loss = false; + /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); for_each_mvm_vif_valid_link(mvmvif, i) @@ -2705,7 +2963,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC, 0); } - } else if (mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + } else if (mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) { iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated @@ -2717,33 +2975,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, &mvm->status), "Failed to update SF upon disassociation\n"); - /* - * If we get an assert during the connection (after the - * station has been added, but before the vif is set - * to associated), mac80211 will re-add the station and - * then configure the vif. Since the vif is not - * associated, we would remove the station here and - * this would fail the recovery. - */ - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, - &mvm->status)) { - /* first remove remaining keys */ - iwl_mvm_sec_key_remove_ap(mvm, vif, - &mvmvif->deflink, 0); - - /* - * Remove AP station now that - * the MAC is unassoc - */ - ret = iwl_mvm_rm_sta_id(mvm, vif, - mvmvif->deflink.ap_sta_id); - if (ret) - IWL_ERR(mvm, - "failed to remove AP station\n"); - - mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; - } - /* remove quota for this interface */ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) @@ -2978,7 +3209,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_stop_ap_ibss_common(mvm, vif); @@ -3008,8 +3239,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); - - mutex_unlock(&mvm->mutex); } static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, @@ -3064,7 +3293,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); @@ -3089,27 +3318,21 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", bss_conf->txpower); - iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); + iwl_mvm_set_tx_power(mvm, bss_conf, bss_conf->txpower); } - - mutex_unlock(&mvm->mutex); } int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; if (hw_req->req.n_channels == 0 || hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) return -EINVAL; - mutex_lock(&mvm->mutex); - ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); } void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, @@ -3117,7 +3340,7 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); /* Due to a race condition, it's possible that mac80211 asks * us to stop a hw_scan when it's already stopped. This can @@ -3128,8 +3351,6 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, */ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); - - mutex_unlock(&mvm->mutex); } void @@ -3208,7 +3429,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, */ break; case STA_NOTIFY_AWAKE: - if (WARN_ON(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON(mvmsta->deflink.sta_id == IWL_INVALID_STA)) break; if (txqs) @@ -3288,6 +3509,8 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); unsigned int link_id; + lockdep_assert_wiphy(mvm->hw->wiphy); + /* * This is called before mac80211 does RCU synchronisation, * so here we already invalidate our internal RCU-protected @@ -3298,7 +3521,7 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, * Since there's mvm->mutex here, no need to have RCU lock for * mvm_sta->link access. */ - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) { struct iwl_mvm_link_sta *link_sta; u32 sta_id; @@ -3315,7 +3538,6 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL); } } - mutex_unlock(&mvm->mutex); } static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -3418,16 +3640,16 @@ iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, .tolerated = true, }; - if (WARN_ON_ONCE(!link_conf->chandef.chan || + if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan || !mvmvif->link[link_id])) return; - if (!(link_conf->chandef.chan->flags & IEEE80211_CHAN_RADAR)) { + if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) { mvmvif->link[link_id]->he_ru_2mhz_block = false; return; } - cfg80211_bss_iter(hw->wiphy, &link_conf->chandef, + cfg80211_bss_iter(hw->wiphy, &link_conf->chanreq.oper, iwl_mvm_check_he_obss_narrow_bw_ru_iter, &iter_data); @@ -3487,10 +3709,10 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, return; /* FIXME: MEI needs to be updated for MLO */ - if (!vif->bss_conf.chandef.chan) + if (!vif->bss_conf.chanreq.oper.chan) return; - conn_info.channel = vif->bss_conf.chandef.chan->hw_value; + conn_info.channel = vif->bss_conf.chanreq.oper.chan->hw_value; switch (mvm_sta->pairwise_cipher) { case WLAN_CIPHER_SUITE_TKIP: @@ -3591,8 +3813,6 @@ static void iwl_mvm_rs_rate_init_all_links(struct iwl_mvm *mvm, } } -#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16 - static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -3674,7 +3894,7 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, if (sta->tdls && (vif->p2p || - iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_MVM_TDLS_STA_COUNT || + iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_TDLS_STA_COUNT || iwl_mvm_phy_ctx_count(mvm) > 1)) { IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); return -EBUSY; @@ -3698,6 +3918,19 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) mvmvif->ap_sta = sta; + /* + * Initialize the rates here already - this really tells + * the firmware only what the supported legacy rates are + * (may be) since it's initialized already from what the + * AP advertised in the beacon/probe response. This will + * allow the firmware to send auth/assoc frames with one + * of the supported rates already, rather than having to + * use a mandatory rate. + * If we're the AP, we'll just assume mandatory rates at + * this point, but we know nothing about the STA anyway. + */ + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); + return 0; } @@ -3724,10 +3957,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw, * the default bss_conf */ if (!mvm->mld_api_is_used && - ((vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) || - (vif->bss_conf.eht_support && - !iwlwifi_mod_params.disable_11be))) + (vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax)) iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->deflink.sta_id); } else if (vif->type == NL80211_IFTYPE_STATION) { iwl_mvm_vif_set_he_support(hw, vif, sta, true); @@ -3779,30 +4010,48 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, NL80211_TDLS_ENABLE_LINK); } else { /* enable beacon filtering */ - WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif)); mvmvif->authorized = 1; + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + mvmvif->link_selection_res = vif->active_links; + mvmvif->link_selection_primary = + vif->active_links ? __ffs(vif->active_links) : 0; + } + callbacks->mac_ctxt_changed(mvm, vif, false); iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); + memset(&mvmvif->last_esr_exit, 0, + sizeof(mvmvif->last_esr_exit)); + + iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT, 0); + + /* Block until FW notif will arrive */ + iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, 0); + /* when client is authorized (AP station marked as such), - * try to enable more links + * try to enable the best link(s). */ if (vif->type == NL80211_IFTYPE_STATION && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - iwl_mvm_mld_select_links(mvm, vif, false); + iwl_mvm_select_links(mvm, vif); } mvm_sta->authorized = true; - iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); - /* MFP is set by default before the station is authorized. * Clear it here in case it's not used. */ - if (!sta->mfp) - return callbacks->update_sta(mvm, vif, sta); + if (!sta->mfp) { + int ret = callbacks->update_sta(mvm, vif, sta); + + if (ret) + return ret; + } + + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); return 0; } @@ -3831,9 +4080,20 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm, * time. */ mvmvif->authorized = 0; + mvmvif->link_selection_res = 0; /* disable beacon filtering */ - iwl_mvm_disable_beacon_filter(mvm, vif, 0); + iwl_mvm_disable_beacon_filter(mvm, vif); + + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->prevent_esr_done_wk); + + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk); + + wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk); + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->unblock_esr_tmp_non_bss_wk); } return 0; @@ -3991,8 +4251,9 @@ int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) } void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u32 changed) + struct ieee80211_link_sta *link_sta, u32 changed) { + struct ieee80211_sta *sta = link_sta->sta; struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (changed & (IEEE80211_RC_BW_CHANGED | @@ -4020,12 +4281,8 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, * The exception is P2P_DEVICE interface which needs immediate update. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - int ret; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - mutex_unlock(&mvm->mutex); - return ret; + guard(mvm)(mvm); + return iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } return 0; } @@ -4034,11 +4291,14 @@ void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + if (info->was_assoc && !mvmvif->session_prot_connection_loss) + return; + + guard(mvm)(mvm); iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id); - mutex_unlock(&mvm->mutex); } void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, @@ -4051,9 +4311,8 @@ void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, if (info->success) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_stop_session_protection(mvm, vif); - mutex_unlock(&mvm->mutex); } int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, @@ -4063,20 +4322,12 @@ int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; + guard(mvm)(mvm); - mutex_lock(&mvm->mutex); - - if (!vif->cfg.idle) { - ret = -EBUSY; - goto out; - } - - ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); + if (!vif->cfg.idle) + return -EBUSY; -out: - mutex_unlock(&mvm->mutex); - return ret; + return iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); } int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, @@ -4354,13 +4605,9 @@ int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); - mutex_unlock(&mvm->mutex); - return ret; + guard(mvm)(mvm); + return __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); } void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, @@ -4412,44 +4659,6 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, return true; } -#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) -#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) -#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) -#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) -#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) - -static void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, - u32 duration_ms, - u32 *duration_tu, - u32 *delay) -{ - u32 dtim_interval = vif->bss_conf.dtim_period * - vif->bss_conf.beacon_int; - - *delay = AUX_ROC_MIN_DELAY; - *duration_tu = MSEC_TO_TU(duration_ms); - - /* - * If we are associated we want the delay time to be at least one - * dtim interval so that the FW can wait until after the DTIM and - * then start the time event, this will potentially allow us to - * remain off-channel for the max duration. - * Since we want to use almost a whole dtim interval we would also - * like the delay to be for 2-3 dtim intervals, in case there are - * other time events with higher priority. - */ - if (vif->cfg.assoc) { - *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); - /* We cannot remain off-channel longer than the DTIM interval */ - if (dtim_interval <= *duration_tu) { - *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER; - if (*duration_tu <= AUX_ROC_MIN_DURATION) - *duration_tu = dtim_interval - - AUX_ROC_MIN_SAFETY_BUFFER; - } - } -} - static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, @@ -4547,48 +4756,6 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, return res; } -static int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, - struct ieee80211_channel *channel, - struct ieee80211_vif *vif, - int duration, u32 activity) -{ - int res; - u32 duration_tu, delay; - struct iwl_roc_req roc_req = { - .action = cpu_to_le32(FW_CTXT_ACTION_ADD), - .activity = cpu_to_le32(activity), - .sta_id = cpu_to_le32(mvm->aux_sta.sta_id), - }; - - lockdep_assert_held(&mvm->mutex); - - /* Set the channel info data */ - iwl_mvm_set_chan_info(mvm, &roc_req.channel_info, - channel->hw_value, - iwl_mvm_phy_band_from_nl80211(channel->band), - IWL_PHY_CHANNEL_MODE20, 0); - - iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu, - &delay); - roc_req.duration = cpu_to_le32(duration_tu); - roc_req.max_delay = cpu_to_le32(delay); - - IWL_DEBUG_TE(mvm, - "\t(requested = %ums, max_delay = %ums)\n", - duration, delay); - IWL_DEBUG_TE(mvm, - "Requesting to remain on channel %u for %utu\n", - channel->hw_value, duration_tu); - - /* Set the node address */ - memcpy(roc_req.node_addr, vif->addr, ETH_ALEN); - - res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), - 0, sizeof(roc_req), &roc_req); - - return res; -} - static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id) { int ret = 0; @@ -4651,7 +4818,7 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm, if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) { ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); - } else if (fw_ver == 3) { + } else if (fw_ver >= 3) { ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, ROC_ACTIVITY_HOTSPOT); } else { @@ -4662,6 +4829,37 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm, return ret; } +static int iwl_mvm_roc_p2p(struct iwl_mvm *mvm, + struct ieee80211_channel *channel, + struct ieee80211_vif *vif, + int duration, + enum ieee80211_roc_type type) +{ + enum iwl_roc_activity activity; + int ret; + + lockdep_assert_held(&mvm->mutex); + + switch (type) { + case IEEE80211_ROC_TYPE_NORMAL: + activity = ROC_ACTIVITY_P2P_DISC; + break; + case IEEE80211_ROC_TYPE_MGMT_TX: + activity = ROC_ACTIVITY_P2P_NEG; + break; + default: + WARN_ONCE(1, "Got an invalid P2P ROC type\n"); + return -EINVAL; + } + + ret = iwl_mvm_mld_add_aux_sta(mvm, + iwl_mvm_get_lmac_id(mvm, channel->band)); + if (ret) + return ret; + + return iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, activity); +} + static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel *channel) @@ -4705,7 +4903,7 @@ static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm, cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); return iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, - &chandef, 1, 1); + &chandef, NULL, 1, 1); } /* Execute the common part for MLD and non-MLD modes */ @@ -4715,6 +4913,7 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct iwl_mvm_roc_ops *ops) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); u32 lmac_id; int ret; @@ -4727,7 +4926,14 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, */ flush_work(&mvm->roc_done_wk); - mutex_lock(&mvm->mutex); + if (!IS_ERR_OR_NULL(bss_vif)) { + ret = iwl_mvm_block_esr_sync(mvm, bss_vif, + IWL_MVM_ESR_BLOCKED_ROC); + if (ret) + return ret; + } + + guard(mvm)(mvm); switch (vif->type) { case NL80211_IFTYPE_STATION: @@ -4737,30 +4943,29 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ret = ops->add_aux_sta_for_hs20(mvm, lmac_id); if (!ret) ret = iwl_mvm_roc_station(mvm, channel, vif, duration); - goto out_unlock; + return ret; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ break; default: IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type); - ret = -EINVAL; - goto out_unlock; + return -EINVAL; } + if (iwl_mvm_has_p2p_over_aux(mvm)) { + ret = iwl_mvm_roc_p2p(mvm, channel, vif, duration, type); + return ret; + } ret = iwl_mvm_p2p_find_phy_ctxt(mvm, vif, channel); if (ret) - goto out_unlock; + return ret; ret = ops->link(mvm, vif); if (ret) - goto out_unlock; + return ret; - ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); -out_unlock: - mutex_unlock(&mvm->mutex); - IWL_DEBUG_MAC80211(mvm, "leave\n"); - return ret; + return iwl_mvm_start_p2p_roc(mvm, vif, duration, type); } int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, @@ -4770,42 +4975,52 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(mvm, "enter\n"); - mutex_lock(&mvm->mutex); iwl_mvm_stop_roc(mvm, vif); - mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return 0; } -struct iwl_mvm_ftm_responder_iter_data { - bool responder; +struct iwl_mvm_chanctx_usage_data { + struct iwl_mvm *mvm; struct ieee80211_chanctx_conf *ctx; + bool use_def; }; -static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) +static void iwl_mvm_chanctx_usage_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) { - struct iwl_mvm_ftm_responder_iter_data *data = _data; + struct iwl_mvm_chanctx_usage_data *data = _data; + struct ieee80211_bss_conf *link_conf; + int link_id; - if (rcu_access_pointer(vif->bss_conf.chanctx_conf) == data->ctx && - vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) - data->responder = true; + for_each_vif_active_link(vif, link_conf, link_id) { + if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx) + continue; + + if (iwl_mvm_enable_fils(data->mvm, vif, data->ctx)) + data->use_def = true; + + if (vif->type == NL80211_IFTYPE_AP && link_conf->ftmr_params) + data->use_def = true; + } } -static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, - struct ieee80211_chanctx_conf *ctx) +struct cfg80211_chan_def * +iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { - struct iwl_mvm_ftm_responder_iter_data data = { - .responder = false, + struct iwl_mvm_chanctx_usage_data data = { + .mvm = mvm, .ctx = ctx, + .use_def = false, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_ftm_responder_chanctx_iter, - &data); - return data.responder; + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_chanctx_usage_iter, + &data); + + return data.use_def ? &ctx->def : &ctx->min_def; } static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, @@ -4813,9 +5028,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; - bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) || - iwl_mvm_enable_fils(mvm, ctx); - struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def; + struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx); int ret; lockdep_assert_held(&mvm->mutex); @@ -4828,7 +5041,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, goto out; } - ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, + ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, &ctx->ap, ctx->rx_chains_static, ctx->rx_chains_dynamic); if (ret) { @@ -4845,13 +5058,9 @@ int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_add_chanctx(mvm, ctx); - mutex_unlock(&mvm->mutex); - return ret; + guard(mvm)(mvm); + return __iwl_mvm_add_chanctx(mvm, ctx); } static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, @@ -4870,9 +5079,8 @@ void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); __iwl_mvm_remove_chanctx(mvm, ctx); - mutex_unlock(&mvm->mutex); } void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, @@ -4881,39 +5089,34 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) || - iwl_mvm_enable_fils(mvm, ctx); - struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def; + struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx); if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | IEEE80211_CHANCTX_CHANGE_RX_CHAINS | IEEE80211_CHANCTX_CHANGE_RADAR | - IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), + IEEE80211_CHANCTX_CHANGE_MIN_DEF)), "Cannot change PHY. Ref=%d, changed=0x%X\n", phy_ctxt->ref, changed)) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); /* we are only changing the min_width, may be a noop */ - if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { + if (changed == IEEE80211_CHANCTX_CHANGE_MIN_DEF) { if (phy_ctxt->width == def->width) - goto out_unlock; + return; /* we are just toggling between 20_NOHT and 20 */ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && def->width <= NL80211_CHAN_WIDTH_20) - goto out_unlock; + return; } iwl_mvm_bt_coex_vif_change(mvm); - iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, + iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap, ctx->rx_chains_static, ctx->rx_chains_dynamic); - -out_unlock: - mutex_unlock(&mvm->mutex); } /* @@ -5033,6 +5236,10 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, } iwl_mvm_update_quotas(mvm, false, NULL); + + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, + link_conf, + false); } goto out; @@ -5052,13 +5259,9 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); - mutex_unlock(&mvm->mutex); - return ret; + guard(mvm)(mvm); + return __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); } /* @@ -5146,9 +5349,8 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); __iwl_mvm_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false); - mutex_unlock(&mvm->mutex); } static int @@ -5158,7 +5360,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, { int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, vifs[0].old_ctx, true); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); @@ -5181,7 +5383,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); - goto out; + return 0; out_remove: __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); @@ -5198,15 +5400,11 @@ out_reassign: goto out_restart; } - goto out; + return ret; out_restart: /* things keep failing, better restart the hw */ - iwl_mvm_nic_restart(mvm, false); - -out: - mutex_unlock(&mvm->mutex); - + iwl_force_nmi(mvm->trans); return ret; } @@ -5217,7 +5415,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, { int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, vifs[0].old_ctx, true); @@ -5229,7 +5427,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, goto out_reassign; } - goto out; + return 0; out_reassign: if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, @@ -5238,15 +5436,11 @@ out_reassign: goto out_restart; } - goto out; + return ret; out_restart: /* things keep failing, better restart the hw */ - iwl_mvm_nic_restart(mvm, false); - -out: - mutex_unlock(&mvm->mutex); - + iwl_force_nmi(mvm->trans); return ret; } @@ -5361,8 +5555,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, return -EINVAL; if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) - return iwl_mvm_enable_beacon_filter(mvm, vif, 0); - return iwl_mvm_disable_beacon_filter(mvm, vif, 0); + return iwl_mvm_enable_beacon_filter(mvm, vif); + return iwl_mvm_disable_beacon_filter(mvm, vif); } return -EOPNOTSUPP; @@ -5373,13 +5567,9 @@ int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int err; - - mutex_lock(&mvm->mutex); - err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); - mutex_unlock(&mvm->mutex); - return err; + guard(mvm)(mvm); + return __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); } #endif @@ -5445,8 +5635,8 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); - if (mvmvif->bf_data.bf_enabled) { - int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + if (mvmvif->bf_enabled) { + int ret = iwl_mvm_disable_beacon_filter(mvm, vif); if (ret) return ret; @@ -5458,19 +5648,32 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, return 0; } +static void iwl_mvm_csa_block_txqs(void *data, struct ieee80211_sta *sta) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state); + } +} + #define IWL_MAX_CSA_BLOCK_TX 1500 -int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, +int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_txq *mvmtxq; int ret; - mutex_lock(&mvm->mutex); + lockdep_assert_held(&mvm->mutex); mvmvif->csa_failed = false; + mvmvif->csa_blocks_tx = false; IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); @@ -5485,30 +5688,38 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active, - "Another CSA is already in progress")) { - ret = -EBUSY; - goto out_unlock; - } + "Another CSA is already in progress")) + return -EBUSY; /* we still didn't unblock tx. prevent new CS meanwhile */ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, - lockdep_is_held(&mvm->mutex))) { - ret = -EBUSY; - goto out_unlock; - } + lockdep_is_held(&mvm->mutex))) + return -EBUSY; rcu_assign_pointer(mvm->csa_vif, vif); if (WARN_ONCE(mvmvif->csa_countdown, - "Previous CSA countdown didn't complete")) { - ret = -EBUSY; - goto out_unlock; - } + "Previous CSA countdown didn't complete")) + return -EBUSY; mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; + if (!chsw->block_tx) + break; + /* don't need blocking in driver otherwise - mac80211 will do */ + if (!ieee80211_hw_check(mvm->hw, HANDLES_QUIET_CSA)) + break; + + mvmvif->csa_blocks_tx = true; + mvmtxq = iwl_mvm_txq_from_mac80211(vif->txq); + set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state); + ieee80211_iterate_stations_atomic(mvm->hw, + iwl_mvm_csa_block_txqs, + NULL); break; case NL80211_IFTYPE_STATION: + mvmvif->csa_blocks_tx = chsw->block_tx; + /* * In the new flow FW is in charge of timing the switch so there * is no need for all of this @@ -5523,10 +5734,8 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, * we don't know the dtim period. In this case, the firmware can't * track the beacons. */ - if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) { - ret = -EBUSY; - goto out_unlock; - } + if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) + return -EBUSY; if (chsw->delay > IWL_MAX_CSA_BLOCK_TX && hweight16(vif->valid_links) <= 1) @@ -5548,7 +5757,7 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); if (ret) - goto out_unlock; + return ret; } else { iwl_mvm_schedule_client_csa(mvm, vif, chsw); } @@ -5564,17 +5773,24 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, ret = iwl_mvm_power_update_ps(mvm); if (ret) - goto out_unlock; + return ret; /* we won't be on this channel any longer */ iwl_mvm_teardown_tdls_peers(mvm); -out_unlock: - mutex_unlock(&mvm->mutex); - return ret; } +static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + guard(mvm)(mvm); + return iwl_mvm_pre_channel_switch(mvm, vif, chsw); +} + void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) @@ -5606,8 +5822,16 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { if (mvmvif->csa_misbehave) { + struct ieee80211_bss_conf *link_conf; + /* Second time, give up on this AP*/ - iwl_mvm_abort_channel_switch(hw, vif); + + link_conf = wiphy_dereference(hw->wiphy, + vif->link_conf[chsw->link_id]); + if (WARN_ON(!link_conf)) + return; + + iwl_mvm_abort_channel_switch(hw, vif, link_conf); ieee80211_chswitch_done(vif, false, 0); mvmvif->csa_misbehave = false; return; @@ -5616,16 +5840,14 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, } mvmvif->csa_count = chsw->count; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (mvmvif->csa_failed) - goto out_unlock; + return; WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); -out_unlock: - mutex_unlock(&mvm->mutex); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) @@ -5633,18 +5855,21 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) int i; if (!iwl_mvm_has_new_tx_api(mvm)) { + /* we can't ask the firmware anything if it is dead */ + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) + return; if (drop) { - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm) & queues); - mutex_unlock(&mvm->mutex); } else { iwl_trans_wait_tx_queues_empty(mvm->trans, queues); } return; } - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { struct ieee80211_sta *sta; @@ -5659,14 +5884,13 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) iwl_mvm_wait_sta_queues_empty(mvm, iwl_mvm_sta_from_mac80211(sta)); } - mutex_unlock(&mvm->mutex); } void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif; struct iwl_mvm_sta *mvmsta; struct ieee80211_sta *sta; bool ap_sta_done = false; @@ -5678,11 +5902,22 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return; } + if (!drop && hweight16(vif->active_links) <= 1) { + int link_id = vif->active_links ? __ffs(vif->active_links) : 0; + struct ieee80211_bss_conf *link_conf; + + link_conf = wiphy_dereference(hw->wiphy, + vif->link_conf[link_id]); + if (WARN_ON(!link_conf)) + return; + if (link_conf->csa_active && mvmvif->csa_blocks_tx) + drop = true; + } + /* Make sure we're done with the deferred traffic before flushing */ flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); - mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { @@ -5717,8 +5952,11 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, /* this can take a while, and we may need/want other operations * to succeed while doing this, so do it without the mutex held + * If the firmware is dead, this can't work... */ - if (!drop && !iwl_mvm_has_new_tx_api(mvm)) + if (!drop && !iwl_mvm_has_new_tx_api(mvm) && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) iwl_trans_wait_tx_queues_empty(mvm->trans, msk); } @@ -5731,7 +5969,7 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta; int link_id; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); for_each_sta_active_link(vif, sta, link_sta, link_id) { mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id], lockdep_is_held(&mvm->mutex)); @@ -5742,14 +5980,71 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mvmsta->tfd_queue_msk)) IWL_ERR(mvm, "flush request fail\n"); } +} + +static int iwl_mvm_mac_get_acs_survey(struct iwl_mvm *mvm, int idx, + struct survey_info *survey) +{ + int chan_idx; + enum nl80211_band band; + int ret; + + mutex_lock(&mvm->mutex); + + if (!mvm->acs_survey) { + ret = -ENOENT; + goto out; + } + + /* Find and return the next entry that has a non-zero active time */ + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband = + mvm->hw->wiphy->bands[band]; + + if (!sband) + continue; + + for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) { + struct iwl_mvm_acs_survey_channel *info = + &mvm->acs_survey->bands[band][chan_idx]; + + if (!info->time) + continue; + + /* Found (the next) channel to report */ + survey->channel = &sband->channels[chan_idx]; + survey->filled = SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX; + survey->time = info->time; + survey->time_busy = info->time_busy; + survey->time_rx = info->time_rx; + survey->time_tx = info->time_tx; + survey->noise = info->noise; + if (survey->noise < 0) + survey->filled |= SURVEY_INFO_NOISE_DBM; + + /* Clear time so that channel is only reported once */ + info->time = 0; + + ret = 0; + goto out; + } + } + + ret = -ENOENT; + +out: mutex_unlock(&mvm->mutex); + + return ret; } int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret = 0; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD), @@ -5757,20 +6052,25 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, memset(survey, 0, sizeof(*survey)); - /* only support global statistics right now */ - if (idx != 0) - return -ENOENT; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; - mutex_lock(&mvm->mutex); + /* + * Return the beacon stats at index zero and pass on following indices + * to the function returning the full survey, most likely for ACS + * (Automatic Channel Selection). + */ + if (idx > 0) + return iwl_mvm_mac_get_acs_survey(mvm, idx - 1, survey); + + guard(mvm)(mvm); if (iwl_mvm_firmware_running(mvm)) { - ret = iwl_mvm_request_statistics(mvm, false); + int ret = iwl_mvm_request_statistics(mvm, false); + if (ret) - goto out; + return ret; } survey->filled = SURVEY_INFO_TIME_RX | @@ -5786,7 +6086,7 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, /* the new fw api doesn't support the following fields */ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) - goto out; + return 0; survey->filled |= SURVEY_INFO_TIME | SURVEY_INFO_TIME_SCAN; @@ -5798,9 +6098,7 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, mvm->radio_stats.on_time_scan; do_div(survey->time_scan, USEC_PER_MSEC); - out: - mutex_unlock(&mvm->mutex); - return ret; + return 0; } static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) @@ -5967,13 +6265,13 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, if (!vif->cfg.assoc) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); - if (mvmvif->deflink.ap_sta_id != mvmsta->deflink.sta_id) - goto unlock; + if (sta != mvmvif->ap_sta) + return; if (iwl_mvm_request_statistics(mvm, false)) - goto unlock; + return; sinfo->rx_beacon = 0; for_each_mvm_vif_valid_link(mvmvif, i) @@ -5987,8 +6285,6 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, mvmvif->deflink.beacon_stats.avg_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } - unlock: - mutex_unlock(&mvm->mutex); } static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, @@ -6108,6 +6404,7 @@ void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, } } +#define SYNC_RX_QUEUE_TIMEOUT (HZ) void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, @@ -6130,7 +6427,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, .len[0] = sizeof(cmd), .data[1] = data, .len[1] = size, - .flags = sync ? 0 : CMD_ASYNC, + .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC), }; int ret; @@ -6155,12 +6452,11 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, if (sync) { lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, - READ_ONCE(mvm->queue_sync_state) == 0 || - iwl_mvm_is_radio_killed(mvm), - HZ); - WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm), - "queue sync: failed to sync, state is 0x%lx\n", - mvm->queue_sync_state); + READ_ONCE(mvm->queue_sync_state) == 0, + SYNC_RX_QUEUE_TIMEOUT); + WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx, cookie %d\n", + mvm->queue_sync_state, + mvm->queue_sync_cookie); } out: @@ -6174,9 +6470,8 @@ void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); - mutex_unlock(&mvm->mutex); } int @@ -6212,13 +6507,9 @@ int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - mutex_lock(&mvm->mutex); - ret = iwl_mvm_ftm_start(mvm, vif, request); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return iwl_mvm_ftm_start(mvm, vif, request); } void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -6226,9 +6517,8 @@ void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_ftm_abort(mvm, request); - mutex_unlock(&mvm->mutex); } static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) @@ -6263,7 +6553,6 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 protocols = 0; - int ret; /* HW timestamping is only supported for a specific station */ if (!hwts->macaddr) @@ -6273,11 +6562,8 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, protocols = IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM; - mutex_lock(&mvm->mutex); - ret = iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols); } const struct ieee80211_ops iwl_mvm_hw_ops = { @@ -6304,7 +6590,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, - .sta_rc_update = iwl_mvm_sta_rc_update, + .link_sta_rc_update = iwl_mvm_sta_rc_update, .conf_tx = iwl_mvm_mac_conf_tx, .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, @@ -6334,7 +6620,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .set_tim = iwl_mvm_set_tim, .channel_switch = iwl_mvm_channel_switch, - .pre_channel_switch = iwl_mvm_pre_channel_switch, + .pre_channel_switch = iwl_mvm_mac_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, .abort_channel_switch = iwl_mvm_abort_channel_switch, .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c index ea3e9e9c6e26..ef0be44207e1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022 - 2023 Intel Corporation + * Copyright (C) 2022 - 2024 Intel Corporation */ #include <linux/kernel.h> #include <net/mac80211.h> @@ -62,11 +62,13 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm, struct ieee80211_key_conf *keyconf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool pairwise = keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE; + bool igtk = keyconf->keyidx == 4 || keyconf->keyidx == 5; u32 flags = 0; lockdep_assert_held(&mvm->mutex); - if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + if (!pairwise) flags |= IWL_SEC_KEY_FLAG_MCAST_KEY; switch (keyconf->cipher) { @@ -96,14 +98,19 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm, if (!sta && vif->type == NL80211_IFTYPE_STATION) sta = mvmvif->ap_sta; - /* Set the MFP flag also for an AP interface where the key is an IGTK - * key as in such a case the station would always be NULL + /* + * If we are installing an iGTK (in AP or STA mode), we need to tell + * the firmware this key will en/decrypt MGMT frames. + * Same goes if we are installing a pairwise key for an MFP station. + * In case we're installing a groupwise key (which is not an iGTK), + * then, we will not use this key for MGMT frames. */ - if ((!IS_ERR_OR_NULL(sta) && sta->mfp) || - (vif->type == NL80211_IFTYPE_AP && - (keyconf->keyidx == 4 || keyconf->keyidx == 5))) + if ((!IS_ERR_OR_NULL(sta) && sta->mfp && pairwise) || igtk) flags |= IWL_SEC_KEY_FLAG_MFP; + if (keyconf->flags & IEEE80211_KEY_FLAG_SPP_AMSDU) + flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU; + return flags; } @@ -137,7 +144,7 @@ static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw, if (sta != data->sta || key->link_id >= 0) return; - err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cmd), &cmd); + err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); if (err) data->err = err; @@ -155,8 +162,8 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, .new_sta_mask = new_sta_mask, }; - ieee80211_iter_keys_rcu(mvm->hw, vif, iwl_mvm_mld_update_sta_key, - &data); + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mld_update_sta_key, + &data); return data.err; } @@ -335,6 +342,21 @@ static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm, return ret; } +int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 sta_mask, + struct ieee80211_key_conf *keyconf) +{ + u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) | + IWL_SEC_KEY_FLAG_MFP; + + if (WARN_ON(!sta_mask)) + return -EINVAL; + + return __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx, + 0); +} + int iwl_mvm_sec_key_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -374,13 +396,13 @@ void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm, u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0); if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION || - link->ap_sta_id == IWL_MVM_INVALID_STA)) + link->ap_sta_id == IWL_INVALID_STA)) return; if (!sec_key_ver) return; - ieee80211_iter_keys_rcu(mvm->hw, vif, - iwl_mvm_sec_key_remove_ap_iter, - (void *)(uintptr_t)link_id); + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_sec_key_remove_ap_iter, + (void *)(uintptr_t)link_id); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c index f313a8d771e4..bb7851042177 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022 - 2023 Intel Corporation + * Copyright (C) 2022 - 2024 Intel Corporation */ #include "mvm.h" @@ -167,7 +167,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC | - MAC_FILTER_IN_CONTROL_AND_MGMT | + MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT | MAC_CFG_FILTER_ACCEPT_BEACON | MAC_CFG_FILTER_ACCEPT_PROBE_REQ | MAC_CFG_FILTER_ACCEPT_GRP); @@ -205,8 +205,11 @@ static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, cmd.p2p_dev.is_disc_extended = iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif); - /* Override the filter flags to accept only probe requests */ - cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ); + /* Override the filter flags to accept all management frames. This is + * needed to support both P2P device discovery using probe requests and + * P2P service discovery using action frames + */ + cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT); return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 893b69fc841b..341a2a7a49ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022-2023 Intel Corporation + * Copyright (C) 2022-2024 Intel Corporation */ #include "mvm.h" @@ -12,10 +12,14 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, int ret; int i; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); + + iwl_mvm_mac_init_mvmvif(mvm, mvmvif); mvmvif->mvm = mvm; + vif->driver_flags |= IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC; + /* Not much to do here. The stack will not allow interface * types or combinations that we didn't advertise, so we * don't really have to check the types. @@ -30,7 +34,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) - goto out_unlock; + return ret; rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); @@ -39,15 +43,13 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, /* reset deflink MLO parameters */ mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; mvmvif->deflink.active = 0; - /* the first link always points to the default one */ - mvmvif->link[0] = &mvmvif->deflink; ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif); if (ret) - goto out_unlock; + return ret; /* beacon filtering */ - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + ret = iwl_mvm_disable_beacon_filter(mvm, vif); if (ret) goto out_remove_mac; @@ -58,9 +60,19 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI; } - ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); - if (ret) - goto out_free_bf; + /* We want link[0] to point to the default link, unless we have MLO and + * in this case this will be modified later by .change_vif_links() + * If we are in the restart flow with an MLD connection, we will wait + * to .change_vif_links() to setup the links. + */ + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + !ieee80211_vif_is_mld(vif)) { + mvmvif->link[0] = &mvmvif->deflink; + + ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (ret) + goto out_free_bf; + } /* Save a pointer to p2p device vif, so it can later be used to * update the p2p device MAC when a GO is started/stopped @@ -73,8 +85,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, goto out_free_bf; iwl_mvm_tcm_add_vif(mvm, vif); - INIT_DELAYED_WORK(&mvmvif->csa_work, - iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) { mvm->monitor_on = true; @@ -92,7 +102,10 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, mvm->csme_vif = vif; } - goto out_unlock; + if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5) + vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW; + + return 0; out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { @@ -103,9 +116,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, out_remove_mac: mvmvif->link[0] = NULL; iwl_mvm_mld_mac_ctxt_remove(mvm, vif); - out_unlock: - mutex_unlock(&mvm->mutex); - return ret; } @@ -122,7 +132,7 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, vif->type == NL80211_IFTYPE_ADHOC)) iwl_mvm_tcm_rm_vif(mvm, vif); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (vif == mvm->csme_vif) { iwl_mei_set_netdev(NULL); @@ -185,21 +195,15 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, mvm->monitor_on = false; __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); } - - mutex_unlock(&mvm->mutex); } -static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif) +static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif) { unsigned int n_active = 0; int i; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { - struct ieee80211_bss_conf *link_conf; - - link_conf = link_conf_dereference_protected(vif, i); - if (link_conf && - rcu_access_pointer(link_conf->chanctx_conf)) + if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt) n_active++; } @@ -233,6 +237,15 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, link->phy_ctxt->rlc_disabled = true; } + if (vif->active_links == mvmvif->link_selection_res && + !WARN_ON(!(vif->active_links & BIT(mvmvif->link_selection_primary)))) + mvmvif->primary_link = mvmvif->link_selection_primary; + else + mvmvif->primary_link = __ffs(vif->active_links); + + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP, + NULL); + return ret; } @@ -245,21 +258,18 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - unsigned int n_active = iwl_mvm_mld_count_active_links(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif); unsigned int link_id = link_conf->link_id; int ret; - /* if the assigned one was not counted yet, count it now */ - if (!rcu_access_pointer(link_conf->chanctx_conf)) - n_active++; - - if (n_active > iwl_mvm_max_active_links(mvm, vif)) - return -EOPNOTSUPP; - if (WARN_ON_ONCE(!mvmvif->link[link_id])) return -EINVAL; + /* if the assigned one was not counted yet, count it now */ + if (!mvmvif->link[link_id]->phy_ctxt) + n_active++; + /* mac parameters such as HE support can change at this stage * For sta, need first to configure correct state from drv_sta_state * and only after that update mac config. @@ -294,37 +304,24 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, if (ret) goto out; - /* Initialize rate control for the AP station, since we might be - * doing a link switch here - we cannot initialize it before since - * this needs the phy context assigned (and in FW?), and we cannot - * do it later because it needs to be initialized as soon as we're - * able to TX on the link, i.e. when active. - * - * Firmware restart isn't quite correct yet for MLO, but we don't - * need to do it in that case anyway since it will happen from the - * normal station state callback. + /* + * if link switching (link not active yet) we'll activate it in + * firmware later on link-info change, which mac80211 guarantees + * for link switch after the stations are set up */ - if (mvmvif->ap_sta && - !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - struct ieee80211_link_sta *link_sta; - - rcu_read_lock(); - link_sta = rcu_dereference(mvmvif->ap_sta->link[link_id]); - - if (!WARN_ON_ONCE(!link_sta)) - iwl_mvm_rs_rate_init(mvm, vif, mvmvif->ap_sta, - link_conf, link_sta, - phy_ctxt->channel->band); - rcu_read_unlock(); + if (ieee80211_vif_link_active(vif, link_conf->link_id)) { + ret = iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE | + LINK_CONTEXT_MODIFY_RATES_INFO, + true); + if (ret) + goto out; } - /* then activate */ - ret = iwl_mvm_link_changed(mvm, vif, link_conf, - LINK_CONTEXT_MODIFY_ACTIVE | - LINK_CONTEXT_MODIFY_RATES_INFO, - true); - if (ret) - goto out; + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, + link_conf, + false); /* * Power state must be updated before quotas, @@ -355,13 +352,23 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); - mutex_unlock(&mvm->mutex); + /* update EMLSR mode */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + int ret; - return ret; + ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, + true); + /* + * Don't activate this link if failed to exit EMLSR in + * the BSS interface + */ + if (ret) + return ret; + } + + guard(mvm)(mvm); + return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); } static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm, @@ -407,6 +414,9 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm, break; } + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_DOWN, + NULL); + return ret; } @@ -419,7 +429,7 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - unsigned int n_active = iwl_mvm_mld_count_active_links(vif); + unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif); unsigned int link_id = link_conf->link_id; /* shouldn't happen, but verify link_id is valid before accessing */ @@ -475,6 +485,87 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw, iwl_mvm_add_link(mvm, vif, link_conf); } mutex_unlock(&mvm->mutex); + + /* update EMLSR mode */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) + iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, false); +} + +static void +iwl_mvm_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd, + const struct ieee80211_bss_conf *bss_info) +{ + u8 i; + + /* + * NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT, + * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE + */ + + BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) != + ARRAY_SIZE(bss_info->tpe.psd_local[0].power)); + + /* if not valid, mac80211 puts default (max value) */ + for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++) + cmd->psd_pwr[i] = min(bss_info->tpe.psd_local[0].power[i], + bss_info->tpe.psd_reg_client[0].power[i]); + + BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) != + ARRAY_SIZE(bss_info->tpe.max_local[0].power)); + + for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++) + cmd->eirp_pwr[i] = min(bss_info->tpe.max_local[0].power[i], + bss_info->tpe.max_reg_client[0].power[i]); +} + +void +iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + bool is_ap) +{ + struct iwl_txpower_constraints_cmd cmd = {}; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link_info = + mvmvif->link[bss_conf->link_id]; + u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, AP_TX_POWER_CONSTRAINTS_CMD); + u32 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) + return; + + if (!link_info->active || + link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) + return; + + if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ) + return; + + cmd.link_id = cpu_to_le16(link_info->fw_link_id); + memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr)); + memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr)); + + if (is_ap) { + cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP); + } else if (bss_conf->power_type == IEEE80211_REG_UNSET_AP) { + return; + } else { + cmd.ap_type = cpu_to_le16(bss_conf->power_type - 1); + iwl_mvm_tpe_sta_cmd_data(&cmd, bss_conf); + } + + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(PHY_OPS_GROUP, + AP_TX_POWER_CONSTRAINTS_CMD), + 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, + "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n", + ret); } static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, @@ -485,11 +576,16 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, + link_conf, true); + /* Send the beacon template */ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf); if (ret) - goto out_unlock; + return ret; /* the link should be already activated when assigning chan context */ ret = iwl_mvm_link_changed(mvm, vif, link_conf, @@ -497,11 +593,11 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, ~LINK_CONTEXT_MODIFY_ACTIVE, true); if (ret) - goto out_unlock; + return ret; ret = iwl_mvm_mld_add_mcast_sta(mvm, vif, link_conf); if (ret) - goto out_unlock; + return ret; /* Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler @@ -525,7 +621,7 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_ftm_restart_responder(mvm, vif, link_conf); - goto out_unlock; + return 0; out_failed: iwl_mvm_power_update_mac(mvm); @@ -533,8 +629,6 @@ out_failed: iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf); out_rm_mcast: iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf); -out_unlock: - mutex_unlock(&mvm->mutex); return ret; } @@ -557,7 +651,7 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_stop_ap_ibss_common(mvm, vif); @@ -571,7 +665,6 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf); iwl_mvm_power_update_mac(mvm); - mutex_unlock(&mvm->mutex); } static void iwl_mvm_mld_stop_ap(struct ieee80211_hw *hw, @@ -604,124 +697,23 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw, &callbacks); } -struct iwl_mvm_link_sel_data { - u8 link_id; - enum nl80211_band band; - bool active; -}; - -static bool iwl_mvm_mld_valid_link_pair(struct iwl_mvm_link_sel_data *a, - struct iwl_mvm_link_sel_data *b) +static bool iwl_mvm_esr_bw_criteria(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { - return a->band != b->band; -} - -void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool valid_links_changed) -{ - struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; - unsigned long usable_links = ieee80211_vif_usable_links(vif); - u32 max_active_links = iwl_mvm_max_active_links(mvm, vif); - u16 new_active_links; - u8 link_id, n_data = 0, i, j; - - if (!IWL_MVM_AUTO_EML_ENABLE) - return; + struct ieee80211_bss_conf *other_link; + int link_id; - if (!ieee80211_vif_is_mld(vif) || usable_links == 1) - return; - - /* The logic below is a simple version that doesn't suit more than 2 - * links - */ - WARN_ON_ONCE(max_active_links > 2); - - /* if only a single active link is supported, assume that the one - * selected by higher layer for connection establishment is the best. - */ - if (max_active_links == 1 && !valid_links_changed) - return; - - /* If we are already using the maximal number of active links, don't do - * any change. This can later be optimized to pick a 'better' link pair. - */ - if (hweight16(vif->active_links) == max_active_links) - return; - - rcu_read_lock(); - - for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_bss_conf *link_conf = - rcu_dereference(vif->link_conf[link_id]); - - if (WARN_ON_ONCE(!link_conf)) + /* Exit EMLSR if links don't have equal bandwidths */ + for_each_vif_active_link(vif, other_link, link_id) { + if (link_id == link_conf->link_id) continue; - - data[n_data].link_id = link_id; - data[n_data].band = link_conf->chandef.chan->band; - data[n_data].active = vif->active_links & BIT(link_id); - n_data++; - } - - rcu_read_unlock(); - - /* this is expected to be the current active link */ - if (n_data == 1) - return; - - new_active_links = 0; - - /* Assume that after association only a single link is active, thus, - * select only the 2nd link - */ - if (!valid_links_changed) { - for (i = 0; i < n_data; i++) { - if (data[i].active) - break; - } - - if (WARN_ON_ONCE(i == n_data)) - return; - - for (j = 0; j < n_data; j++) { - if (i == j) - continue; - - if (iwl_mvm_mld_valid_link_pair(&data[i], &data[j])) - break; - } - - if (j != n_data) - new_active_links = BIT(data[i].link_id) | - BIT(data[j].link_id); - } else { - /* Try to find a valid link pair for EMLSR operation. If a pair - * is not found continue using the current active link. - */ - for (i = 0; i < n_data; i++) { - for (j = 0; j < n_data; j++) { - if (i == j) - continue; - - if (iwl_mvm_mld_valid_link_pair(&data[i], - &data[j])) - break; - } - - /* found a valid pair for EMLSR, use it */ - if (j != n_data) { - new_active_links = BIT(data[i].link_id) | - BIT(data[j].link_id); - break; - } - } + if (link_conf->chanreq.oper.width == + other_link->chanreq.oper.width) + return true; } - if (!new_active_links) - return; - - if (vif->active_links != new_active_links) - ieee80211_set_active_links_async(vif, new_active_links); + return false; } static void @@ -738,6 +730,11 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, if (WARN_ON_ONCE(!mvmvif->link[link_conf->link_id])) return; + /* not yet marked active in vif means during link switch */ + if (!ieee80211_vif_link_active(vif, link_conf->link_id) && + vif->cfg.assoc && mvmvif->link[link_conf->link_id]->phy_ctxt) + link_changes |= LINK_CONTEXT_MODIFY_ACTIVE; + has_he = link_conf->he_support && !iwlwifi_mod_params.disable_11ax; has_eht = link_conf->eht_support && !iwlwifi_mod_params.disable_11be; @@ -753,8 +750,16 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS; } - /* Update EHT Puncturing info */ - if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc) + if ((changes & BSS_CHANGED_BANDWIDTH) && + ieee80211_vif_link_active(vif, link_conf->link_id) && + mvmvif->esr_active && + !iwl_mvm_esr_bw_criteria(mvm, vif, link_conf)) + iwl_mvm_exit_esr(mvm, vif, + IWL_MVM_ESR_EXIT_BANDWIDTH, + iwl_mvm_get_primary_link(vif)); + + /* if associated, maybe puncturing changed - we'll check later */ + if (vif->cfg.assoc) link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS; if (link_changes) { @@ -768,9 +773,6 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); - if (changes & BSS_CHANGED_MLD_VALID_LINKS) - iwl_mvm_mld_select_links(mvm, vif, true); - memcpy(mvmvif->link[link_conf->link_id]->bssid, link_conf->bssid, ETH_ALEN); @@ -782,37 +784,13 @@ static bool iwl_mvm_mld_vif_have_valid_ap_sta(struct iwl_mvm_vif *mvmvif) int i; for_each_mvm_vif_valid_link(mvmvif, i) { - if (mvmvif->link[i]->ap_sta_id != IWL_MVM_INVALID_STA) + if (mvmvif->link[i]->ap_sta_id != IWL_INVALID_STA) return true; } return false; } -static void iwl_mvm_mld_vif_delete_all_stas(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int i, ret; - - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - return; - - for_each_mvm_vif_valid_link(mvmvif, i) { - struct iwl_mvm_vif_link_info *link = mvmvif->link[i]; - - if (!link) - continue; - - iwl_mvm_sec_key_remove_ap(mvm, vif, link, i); - ret = iwl_mvm_mld_rm_sta_id(mvm, link->ap_sta_id); - if (ret) - IWL_ERR(mvm, "failed to remove AP station\n"); - - link->ap_sta_id = IWL_MVM_INVALID_STA; - } -} - static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u64 changes) @@ -837,8 +815,15 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) { - /* clear statistics to get clean beacon counter */ + mvmvif->session_prot_connection_loss = false; + + /* + * Clear statistics to get clean beacon counter, and ask for + * periodic statistics, as they are needed for link + * selection and RX OMI decisions. + */ iwl_mvm_request_statistics(mvm, true); + iwl_mvm_request_periodic_system_statistics(mvm, true); iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); @@ -886,6 +871,8 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) { iwl_mvm_mei_host_disassociated(mvm); + iwl_mvm_request_periodic_system_statistics(mvm, false); + /* If update fails - SF might be running in associated * mode while disassociated - which is forbidden. */ @@ -894,15 +881,6 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status), "Failed to update SF upon disassociation\n"); - - /* If we get an assert during the connection (after the - * station has been added, but before the vif is set - * to associated), mac80211 will re-add the station and - * then configure the vif. Since the vif is not - * associated, we would remove the station here and - * this would fail the recovery. - */ - iwl_mvm_mld_vif_delete_all_stas(mvm, vif); } iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); @@ -913,6 +891,11 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update power mode\n"); } + + if (changes & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM) && + ieee80211_vif_is_mld(vif) && mvmvif->authorized) + wiphy_delayed_work_queue(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk, 0); } static void @@ -965,7 +948,7 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); switch (vif->type) { case NL80211_IFTYPE_STATION: @@ -989,10 +972,8 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw, if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", link_conf->txpower); - iwl_mvm_set_tx_power(mvm, vif, link_conf->txpower); + iwl_mvm_set_tx_power(mvm, link_conf, link_conf->txpower); } - - mutex_unlock(&mvm->mutex); } static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw, @@ -1001,15 +982,13 @@ static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); if (vif->type == NL80211_IFTYPE_STATION) iwl_mvm_mld_vif_cfg_changed_station(mvm, vif, changes); - - mutex_unlock(&mvm->mutex); } static int @@ -1042,9 +1021,8 @@ static void iwl_mvm_mld_config_iface_filter(struct ieee80211_hw *hw, !vif->p2p) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); - mutex_unlock(&mvm->mutex); } static int @@ -1066,14 +1044,10 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw, * The exception is P2P_DEVICE interface which needs immediate update. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - int ret; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, - LINK_CONTEXT_MODIFY_QOS_PARAMS, - true); - mutex_unlock(&mvm->mutex); - return ret; + guard(mvm)(mvm); + return iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_QOS_PARAMS, + true); } return 0; } @@ -1122,20 +1096,13 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) { struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {}; - unsigned int n_active = iwl_mvm_mld_count_active_links(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 removed = old_links & ~new_links; u16 added = new_links & ~old_links; int err, i; - if (hweight16(new_links) > 1 && - n_active > iwl_mvm_max_active_links(mvm, vif)) - return -EOPNOTSUPP; - for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { - int r; - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) break; @@ -1147,19 +1114,17 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, goto free; } - new_link[i]->bcast_sta.sta_id = IWL_MVM_INVALID_STA; - new_link[i]->mcast_sta.sta_id = IWL_MVM_INVALID_STA; - new_link[i]->ap_sta_id = IWL_MVM_INVALID_STA; new_link[i]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; - - for (r = 0; r < NUM_IWL_MVM_SMPS_REQ; r++) - new_link[i]->smps_requests[r] = - IEEE80211_SMPS_AUTOMATIC; + iwl_mvm_init_link(new_link[i]); } mutex_lock(&mvm->mutex); - if (old_links == 0) { + /* If we're in RESTART flow, the default link wasn't added in + * drv_add_interface(), and link[0] doesn't point to it. + */ + if (old_links == 0 && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, + &mvm->status)) { err = iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); if (err) goto out_err; @@ -1196,6 +1161,14 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, if (new_links == 0) { mvmvif->link[0] = &mvmvif->deflink; err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (err == 0) + mvmvif->primary_link = 0; + } else if (!(new_links & BIT(mvmvif->primary_link))) { + /* + * Ensure we always have a valid primary_link, the real + * decision happens later when PHY is activated. + */ + mvmvif->primary_link = __ffs(new_links); } out_err: @@ -1215,15 +1188,154 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw, u16 old_links, u16 new_links) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + guard(mvm)(mvm); + return iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links); +} + +bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + const struct wiphy_iftype_ext_capab *ext_capa; + + lockdep_assert_held(&mvm->mutex); + + if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc || + hweight16(ieee80211_vif_usable_links(vif)) == 1) + return false; + + if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP)) + return false; + + ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy, + ieee80211_vif_type_p2p(vif)); + return (ext_capa && + (ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP)); +} + +static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 desired_links) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int n_links = hweight16(desired_links); + + if (n_links <= 1) + return true; + + guard(mvm)(mvm); + + /* Check if HW supports the wanted number of links */ + if (n_links > iwl_mvm_max_active_links(mvm, vif)) + return false; + + /* If it is an eSR device, check that we can enter eSR */ + return iwl_mvm_is_esr_supported(mvm->fwrt.trans) && + iwl_mvm_vif_has_esr_cap(mvm, vif); +} + +static enum ieee80211_neg_ttlm_res +iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_neg_ttlm *neg_ttlm) +{ + u16 map; + u8 i; + + /* Verify all TIDs are mapped to the same links set */ + map = neg_ttlm->downlink[0]; + for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) { + if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] || + neg_ttlm->uplink[i] != map) + return NEG_TTLM_RES_REJECT; + } + + return NEG_TTLM_RES_ACCEPT; +} + +static int +iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); - ret = iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links); + if (mvmvif->esr_active) { + u8 primary = iwl_mvm_get_primary_link(vif); + int selected; + + /* prefer primary unless quiet CSA on it */ + if (chsw->link_id == primary && chsw->block_tx) + selected = iwl_mvm_get_other_link(vif, primary); + else + selected = primary; + + /* + * remembers to tell the firmware that this link can't tx + * Note that this logic seems to be unrelated to esr, but it + * really is needed only when esr is active. When we have a + * single link, the firmware will handle all this on its own. + * In multi-link scenarios, we can learn about the CSA from + * another link and this logic is too complex for the firmware + * to track. + * Since we want to de-activate the link that got a CSA, we + * need to tell the firmware not to send any frame on that link + * as the firmware may not be aware that link is under a CSA + * with mode=1 (no Tx allowed). + */ + if (chsw->block_tx && mvmvif->link[chsw->link_id]) + mvmvif->link[chsw->link_id]->csa_block_tx = true; + + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected); + mutex_unlock(&mvm->mutex); + + /* + * If we've not kept the link active that's doing the CSA + * then we don't need to do anything else, just return. + */ + if (selected != chsw->link_id) + return 0; + + mutex_lock(&mvm->mutex); + } + + ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw); mutex_unlock(&mvm->mutex); return ret; } +#define IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT (5 * HZ) + +static void iwl_mvm_mld_prep_add_interface(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); + struct iwl_mvm_vif *mvmvif; + int ret; + + IWL_DEBUG_MAC80211(mvm, "prep_add_interface: type=%u\n", + type); + + if (IS_ERR_OR_NULL(bss_vif) || + !(type == NL80211_IFTYPE_AP || + type == NL80211_IFTYPE_P2P_GO || + type == NL80211_IFTYPE_P2P_CLIENT)) + return; + + mvmvif = iwl_mvm_vif_from_mac80211(bss_vif); + ret = iwl_mvm_block_esr_sync(mvm, bss_vif, + IWL_MVM_ESR_BLOCKED_TMP_NON_BSS); + if (ret) + return; + + wiphy_delayed_work_queue(mvmvif->mvm->hw->wiphy, + &mvmvif->unblock_esr_tmp_non_bss_wk, + IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT); +} + const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .tx = iwl_mvm_mac_tx, .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, @@ -1249,7 +1361,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, - .sta_rc_update = iwl_mvm_sta_rc_update, + .link_sta_rc_update = iwl_mvm_sta_rc_update, .conf_tx = iwl_mvm_mld_mac_conf_tx, .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, @@ -1277,7 +1389,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .tx_last_beacon = iwl_mvm_tx_last_beacon, .channel_switch = iwl_mvm_channel_switch, - .pre_channel_switch = iwl_mvm_pre_channel_switch, + .pre_channel_switch = iwl_mvm_mld_mac_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, .abort_channel_switch = iwl_mvm_abort_channel_switch, .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, @@ -1318,4 +1430,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .change_vif_links = iwl_mvm_mld_change_vif_links, .change_sta_links = iwl_mvm_mld_change_sta_links, + .can_activate_links = iwl_mvm_mld_can_activate_links, + .can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm, + .prep_add_interface = iwl_mvm_mld_prep_add_interface, }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 1628bf55458f..2f159024eeb8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022-2023 Intel Corporation + * Copyright (C) 2022-2024 Intel Corporation */ #include "mvm.h" #include "time-sync.h" @@ -9,7 +9,9 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int filter_link_id) { + struct ieee80211_link_sta *link_sta; struct iwl_mvm_sta *mvmsta; + struct ieee80211_vif *vif; unsigned int link_id; u32 result = 0; @@ -17,33 +19,34 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return 0; mvmsta = iwl_mvm_sta_from_mac80211(sta); + vif = mvmsta->vif; /* it's easy when the STA is not an MLD */ if (!sta->valid_links) return BIT(mvmsta->deflink.sta_id); /* but if it is an MLD, get the mask of all the FW STAs it has ... */ - for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) { - struct iwl_mvm_link_sta *link_sta; + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct iwl_mvm_link_sta *mvm_link_sta; /* unless we have a specific link in mind */ if (filter_link_id >= 0 && link_id != filter_link_id) continue; - link_sta = + mvm_link_sta = rcu_dereference_check(mvmsta->link[link_id], lockdep_is_held(&mvm->mutex)); - if (!link_sta) + if (!mvm_link_sta) continue; - result |= BIT(link_sta->sta_id); + result |= BIT(mvm_link_sta->sta_id); } return result; } static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm, - struct iwl_mvm_sta_cfg_cmd *cmd) + struct iwl_sta_cfg_cmd *cmd) { int ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD), @@ -60,7 +63,7 @@ static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, const u8 *addr, int link_id) { - struct iwl_mvm_sta_cfg_cmd cmd; + struct iwl_sta_cfg_cmd cmd; lockdep_assert_held(&mvm->mutex); @@ -91,7 +94,7 @@ static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm, */ static int iwl_mvm_mld_rm_sta_from_fw(struct iwl_mvm *mvm, u32 sta_id) { - struct iwl_mvm_remove_sta_cmd rm_sta_cmd = { + struct iwl_remove_sta_cmd rm_sta_cmd = { .sta_id = cpu_to_le32(sta_id), }; int ret; @@ -143,7 +146,7 @@ int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm, unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout : mvm->trans->trans_cfg->base_params->wd_timeout; - if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA)) return -ENOSPC; if (sta->type == STATION_TYPE_AUX) @@ -213,7 +216,7 @@ int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); + iwl_mvm_get_wd_timeout(mvm, vif); u16 *queue; lockdep_assert_held(&mvm->mutex); @@ -238,7 +241,7 @@ int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_MAX_TID_COUNT, &wdg_timeout); } -/* Allocate a new station entry for the broadcast station to the given vif, +/* Allocate a new station entry for the multicast station to the given vif, * and send it to the FW. * Note that each AP/GO mac should have its own multicast station. */ @@ -251,7 +254,7 @@ int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *msta = &mvm_link->mcast_sta; static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; const u8 *maddr = _maddr; - unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); + unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif); lockdep_assert_held(&mvm->mutex); @@ -343,7 +346,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(int_sta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(int_sta->sta_id == IWL_INVALID_STA)) return -EINVAL; if (flush) @@ -435,7 +438,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif_link_info *link_info = mvm_vif->link[link_conf->link_id]; - struct iwl_mvm_sta_cfg_cmd cmd = { + struct iwl_sta_cfg_cmd cmd = { .sta_id = cpu_to_le32(mvm_link_sta->sta_id), .station_type = cpu_to_le32(mvm_sta->sta_type), }; @@ -467,7 +470,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta, break; } - switch (sta->deflink.smps_mode) { + switch (link_sta->smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); @@ -512,14 +515,15 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return iwl_mvm_mld_send_sta_cmd(mvm, &cmd); } -static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, - struct iwl_mvm_link_sta *mvm_sta_link, - unsigned int link_id, - bool is_in_fw) +void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct iwl_mvm_link_sta *mvm_sta_link, + unsigned int link_id) { - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], - is_in_fw ? ERR_PTR(-EINVAL) : NULL); + lockdep_assert_wiphy(mvm->hw->wiphy); + lockdep_assert_held(&mvm->mutex); + + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], NULL); RCU_INIT_POINTER(mvm->fw_id_to_link_sta[mvm_sta_link->sta_id], NULL); RCU_INIT_POINTER(mvm_sta->link[link_id], NULL); @@ -540,7 +544,7 @@ static void iwl_mvm_mld_sta_rm_all_sta_links(struct iwl_mvm *mvm, if (!link) continue; - iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id, false); + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id); } } @@ -556,7 +560,10 @@ static int iwl_mvm_mld_alloc_sta_link(struct iwl_mvm *mvm, u32 sta_id = iwl_mvm_find_free_sta_id(mvm, ieee80211_vif_type_p2p(vif)); - if (sta_id == IWL_MVM_INVALID_STA) + lockdep_assert_wiphy(mvm->hw->wiphy); + lockdep_assert_held(&mvm->mutex); + + if (sta_id == IWL_INVALID_STA) return -ENOSPC; if (rcu_access_pointer(sta->link[link_id]) == &sta->deflink) { @@ -582,14 +589,14 @@ static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; unsigned int link_id; int ret; lockdep_assert_held(&mvm->mutex); - for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { - if (!rcu_access_pointer(sta->link[link_id]) || - mvm_sta->link[link_id]) + for_each_sta_active_link(vif, sta, link_sta, link_id) { + if (WARN_ON(mvm_sta->link[link_id])) continue; ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id); @@ -609,16 +616,13 @@ static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta, struct iwl_mvm_link_sta *sta_link) { if (!sta->tdls) { - WARN_ON(vif_link->ap_sta_id != IWL_MVM_INVALID_STA); + WARN_ON(vif_link->ap_sta_id != IWL_INVALID_STA); vif_link->ap_sta_id = sta_link->sta_id; } else { - WARN_ON(vif_link->ap_sta_id == IWL_MVM_INVALID_STA); + WARN_ON(vif_link->ap_sta_id == IWL_INVALID_STA); } } -/* FIXME: consider waiting for mac80211 to add the STA instead of allocating - * queues here - */ static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -631,6 +635,9 @@ static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm, int ret = -EINVAL; int sta_id; + lockdep_assert_wiphy(mvm->hw->wiphy); + lockdep_assert_held(&mvm->mutex); + /* First add an empty station since allocating a queue requires * a valid station. Since we need a link_id to allocate a station, * pick up the first valid one. @@ -686,7 +693,7 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, spin_lock_init(&mvm_sta->lock); - ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA, + ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_INVALID_STA, STATION_TYPE_PEER); } else { ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta); @@ -723,7 +730,6 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif->link[link_id], mvm_link_sta); } - return 0; err: @@ -836,29 +842,30 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_link_sta *mvm_link_sta = rcu_dereference_protected(mvm_sta->link[link_id], lockdep_is_held(&mvm->mutex)); - bool stay_in_fw; + iwl_mvm_sta_del(mvm, vif, sta, link_sta); - stay_in_fw = iwl_mvm_sta_del(mvm, vif, sta, link_sta, &ret); - if (ret) - break; + ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_link_sta->sta_id); - if (!stay_in_fw) - ret = iwl_mvm_mld_rm_sta_from_fw(mvm, - mvm_link_sta->sta_id); - - iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, - link_id, stay_in_fw); + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, link_id); } + kfree(mvm_sta->mpdu_counters); + mvm_sta->mpdu_counters = NULL; return ret; } int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id) { - int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id); + int ret; + lockdep_assert_wiphy(mvm->hw->wiphy); lockdep_assert_held(&mvm->mutex); + if (WARN_ON(sta_id == IWL_INVALID_STA)) + return 0; + + ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL); return ret; @@ -984,6 +991,10 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm, u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); int baid; + /* mac80211 will remove sessions later, but we ignore all that */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return 0; + BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) { @@ -1004,7 +1015,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm, cmd.modify.tid = cpu_to_le32(data->tid); - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL, + sizeof(cmd), &cmd); data->sta_mask = new_sta_mask; if (ret) return ret; @@ -1053,6 +1065,7 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, unsigned int link_id; int ret; + lockdep_assert_wiphy(mvm->hw->wiphy); lockdep_assert_held(&mvm->mutex); for_each_set_bit(link_id, &old_links_long, @@ -1098,10 +1111,9 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, goto err; if (vif->type == NL80211_IFTYPE_STATION) - mvm_vif_link->ap_sta_id = IWL_MVM_INVALID_STA; + mvm_vif_link->ap_sta_id = IWL_INVALID_STA; - iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id, - false); + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id); } for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) { @@ -1117,10 +1129,21 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - if (WARN_ON(!mvm_sta->link[link_id])) { + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + u32 sta_id; + + if (WARN_ON(!mvm_link_sta)) { ret = -EINVAL; goto err; } + + sta_id = mvm_link_sta->sta_id; + + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); + rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id], + link_sta); } else { if (WARN_ON(mvm_sta->link[link_id])) { ret = -EINVAL; @@ -1160,6 +1183,9 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, link_sta_added_to_fw |= BIT(link_id); iwl_mvm_rs_add_sta_link(mvm, mvm_sta_link); + + iwl_mvm_rs_rate_init(mvm, vif, sta, link_conf, link_sta, + link_conf->chanreq.oper.chan->band); } if (sta_mask_added) { @@ -1191,8 +1217,7 @@ err: rcu_dereference_protected(mvm_sta->link[link_id], lockdep_is_held(&mvm->mutex)); - iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id, - false); + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id); } return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 81dbef6947f5..ee769da72e68 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -9,6 +9,7 @@ #include <linux/list.h> #include <linux/spinlock.h> +#include <linux/cleanup.h> #include <linux/leds.h> #include <linux/in6.h> @@ -23,7 +24,7 @@ #include "iwl-op-mode.h" #include "iwl-trans.h" #include "fw/notif-wait.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "fw/file.h" #include "iwl-config.h" #include "sta.h" @@ -40,8 +41,9 @@ #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ #define IWL_RSSI_OFFSET 50 +#define IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD 4 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 -#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16 +#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 19 /* A TimeUnit is 1024 microsecond */ #define MSEC_TO_TU(_msec) (_msec*1000/1024) @@ -81,14 +83,9 @@ extern const struct ieee80211_ops iwl_mvm_mld_hw_ops; /** * struct iwl_mvm_mod_params - module parameters for iwlmvm - * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. - * We will register to mac80211 to have testmode working. The NIC must not - * be up'ed after the INIT fw asserted. This is useful to be able to use - * proprietary tools over testmode to debug the INIT fw. * @power_scheme: one of enum iwl_power_scheme */ struct iwl_mvm_mod_params { - bool init_dbg; int power_scheme; }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; @@ -105,6 +102,8 @@ struct iwl_mvm_phy_ctxt { /* track for RLC config command */ u32 center_freq1; bool rlc_disabled; + u32 channel_load_by_us; + u32 channel_load_not_by_us; }; struct iwl_mvm_time_event_data { @@ -121,7 +120,7 @@ struct iwl_mvm_time_event_data { * if the te is in the time event list or not (when id == TE_MAX) */ u32 id; - u8 link_id; + s8 link_id; }; /* Power management */ @@ -253,18 +252,14 @@ enum iwl_mvm_low_latency_cause { }; /** -* struct iwl_mvm_vif_bf_data - beacon filtering related data -* @bf_enabled: indicates if beacon filtering is enabled -* @ba_enabled: indicated if beacon abort is enabled +* struct iwl_mvm_link_bf_data - beacon filtering related data * @ave_beacon_signal: average beacon signal * @last_cqm_event: rssi of the last cqm event * @bt_coex_min_thold: minimum threshold for BT coex * @bt_coex_max_thold: maximum threshold for BT coex * @last_bt_coex_event: rssi of the last BT coex event */ -struct iwl_mvm_vif_bf_data { - bool bf_enabled; - bool ba_enabled; +struct iwl_mvm_link_bf_data { int ave_beacon_signal; int last_cqm_event; int bt_coex_min_thold; @@ -305,8 +300,10 @@ struct iwl_probe_resp_data { * @active: indicates the link is active in FW (for sanity checking) * @cab_queue: content-after-beacon (multicast) queue * @listen_lmac: indicates this link is allocated to the listen LMAC + * @csa_block_tx: we got CSA with mode=1 * @mcast_sta: multicast station * @phy_ctxt: phy context allocated to this link, if any + * @bf_data: beacon filtering data */ struct iwl_mvm_vif_link_info { u8 bssid[ETH_ALEN]; @@ -329,6 +326,7 @@ struct iwl_mvm_vif_link_info { bool he_ru_2mhz_block; bool active; bool listen_lmac; + bool csa_block_tx; u16 cab_queue; /* Assigned while mac80211 has the link in a channel context, @@ -342,6 +340,72 @@ struct iwl_mvm_vif_link_info { struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; u16 mgmt_queue; + + struct iwl_mvm_link_bf_data bf_data; +}; + +/** + * enum iwl_mvm_esr_state - defines reasons for which the EMLSR is exited or + * blocked. + * The low 16 bits are used for blocking reasons, and the 16 higher bits + * are used for exit reasons. + * For the blocking reasons - use iwl_mvm_(un)block_esr(), and for the exit + * reasons - use iwl_mvm_exit_esr(). + * + * Note: new reasons shall be added to HANDLE_ESR_REASONS as well (for logs) + * + * @IWL_MVM_ESR_BLOCKED_PREVENTION: Prevent EMLSR to avoid entering and exiting + * in a loop. + * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR + * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic + * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR + * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is + * preventing EMLSR + * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR + * @IWL_MVM_ESR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's link + * is preventing EMLSR. This is a temporary blocking that is set when there + * is an indication that a non-BSS interface is to be added. + * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons + * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR + * due to low RSSI. + * @IWL_MVM_ESR_EXIT_COEX: link is deactivated/not allowed for EMLSR + * due to BT Coex. + * @IWL_MVM_ESR_EXIT_BANDWIDTH: Bandwidths of primary and secondry links + * preventing the enablement of EMLSR + * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR + * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link + * @IWL_MVM_ESR_EXIT_FAIL_ENTRY: Exit EMLSR due to entry failure + */ +enum iwl_mvm_esr_state { + IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1, + IWL_MVM_ESR_BLOCKED_WOWLAN = 0x2, + IWL_MVM_ESR_BLOCKED_TPT = 0x4, + IWL_MVM_ESR_BLOCKED_FW = 0x8, + IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10, + IWL_MVM_ESR_BLOCKED_ROC = 0x20, + IWL_MVM_ESR_BLOCKED_TMP_NON_BSS = 0x40, + IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000, + IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000, + IWL_MVM_ESR_EXIT_COEX = 0x40000, + IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000, + IWL_MVM_ESR_EXIT_CSA = 0x100000, + IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000, + IWL_MVM_ESR_EXIT_FAIL_ENTRY = 0x400000, +}; + +#define IWL_MVM_BLOCK_ESR_REASONS 0xffff + +const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state); + +/** + * struct iwl_mvm_esr_exit - details of the last exit from EMLSR mode. + * @reason: The reason for the last exit from EMLSR. + * &iwl_mvm_prevent_esr_reasons. Will be 0 before exiting EMLSR. + * @ts: the time stamp of the last time we existed EMLSR. + */ +struct iwl_mvm_esr_exit { + unsigned long ts; + enum iwl_mvm_esr_state reason; }; /** @@ -368,14 +432,67 @@ struct iwl_mvm_vif_link_info { * @csa_countdown: indicates that CSA countdown may be started * @csa_failed: CSA failed to schedule time event, report an error later * @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel + * @csa_blocks_tx: CSA is blocking TX * @features: hw features active for this vif + * @max_tx_op: max TXOP in usecs for all ACs, zero for no limit. * @ap_beacon_time: AP beacon time for synchronisation (on older FW) + * @bf_enabled: indicates if beacon filtering is enabled + * @ba_enabled: indicated if beacon abort is enabled * @bcn_prot: beacon protection data (keys; FIXME: needs to be per link) - * @bf_data: beacon filtering data * @deflink: default link data for use in non-MLO * @link: link data for each link in MLO * @esr_active: indicates eSR mode is active + * @esr_disable_reason: a bitmap of &enum iwl_mvm_esr_state * @pm_enabled: indicates powersave is enabled + * @link_selection_res: bitmap of active links as it was decided in the last + * link selection. Valid only for a MLO vif after assoc. 0 if there wasn't + * any link selection yet. + * @link_selection_primary: primary link selected by link selection + * @primary_link: primary link in eSR. Valid only for an associated MLD vif, + * and in eSR mode. Valid only for a STA. + * @last_esr_exit: Details of the last exit from EMLSR. + * @exit_same_reason_count: The number of times we exited due to the specified + * @last_esr_exit::reason, only counting exits due to + * &IWL_MVM_ESR_PREVENT_REASONS. + * @prevent_esr_done_wk: work that should be done when esr prevention ends. + * @mlo_int_scan_wk: work for the internal MLO scan. + * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough. + * @unblock_esr_tmp_non_bss_wk: work for removing the + * IWL_MVM_ESR_BLOCKED_TMP_NON_BSS blocking for EMLSR. + * @roc_activity: currently running ROC activity for this vif (or + * ROC_NUM_ACTIVITIES if no activity is running). + * @session_prot_connection_loss: the connection was lost due to session + * protection ending without receiving a beacon, so we need to now + * protect the deauth separately + * @ap_early_keys: The firmware cannot install keys before stations etc., + * but higher layers work differently, so we store the keys here for + * later installation. + * @ap_sta: pointer to the AP STA data structure + * @csa_count: CSA counter (old CSA implementation w/o firmware) + * @csa_misbehave: CSA AP misbehaviour flag (old implementation) + * @csa_target_freq: CSA target channel frequency (old implementation) + * @csa_work: CSA work (old implementation) + * @dbgfs_bf: beamforming debugfs data + * @dbgfs_dir: debugfs directory for this vif + * @dbgfs_pm: power management debugfs data + * @dbgfs_quota_min: debugfs value for minimal quota + * @dbgfs_slink: debugfs symlink for this interface + * @ftm_unprotected: unprotected FTM debugfs override + * @hs_time_event_data: hotspot/AUX ROC time event data + * @mac_pwr_cmd: debugfs override for MAC power command + * @target_ipv6_addrs: IPv6 addresses on this interface for offload + * @num_target_ipv6_addrs: number of @target_ipv6_addrs + * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs + * @rekey_data: rekeying data for WoWLAN GTK rekey offload + * @seqno: storage for seqno for older firmware D0/D3 transition + * @seqno_valid: indicates @seqno is valid + * @time_event_data: session protection time event data + * @tsf_id: the TSF resource ID assigned in firmware (for firmware needing that) + * @tx_key_idx: WEP transmit key index for D3 + * @uapsd_misbehaving_ap_addr: MLD address/BSSID of U-APSD misbehaving AP, to + * not use U-APSD on reconnection + * @uapsd_nonagg_detected_wk: worker for handling detection of no aggregation + * in U-APSD */ struct iwl_mvm_vif { struct iwl_mvm *mvm; @@ -389,6 +506,7 @@ struct iwl_mvm_vif { bool pm_enabled; bool monitor_active; bool esr_active; + bool session_prot_connection_loss; u8 low_latency: 6; u8 low_latency_actual: 1; @@ -396,10 +514,12 @@ struct iwl_mvm_vif { u8 authorized:1; bool ps_disabled; + u32 esr_disable_reason; u32 ap_beacon_time; - struct iwl_mvm_vif_bf_data bf_data; + bool bf_enabled; + bool ba_enabled; -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP /* WoWLAN GTK rekey data */ struct { u8 kck[NL80211_KCK_EXT_LEN]; @@ -431,6 +551,7 @@ struct iwl_mvm_vif { struct iwl_dbgfs_bf dbgfs_bf; struct iwl_mac_power_cmd mac_pwr_cmd; int dbgfs_quota_min; + bool ftm_unprotected; #endif /* FW identified misbehaving AP */ @@ -440,6 +561,7 @@ struct iwl_mvm_vif { bool csa_countdown; bool csa_failed; bool csa_bcn_pending; + bool csa_blocks_tx; u16 csa_target_freq; u16 csa_count; u16 csa_misbehave; @@ -449,6 +571,7 @@ struct iwl_mvm_vif { struct iwl_mvm_time_event_data time_event_data; struct iwl_mvm_time_event_data hs_time_event_data; + enum iwl_roc_activity roc_activity; /* TCP Checksum Offload */ netdev_features_t features; @@ -462,6 +585,18 @@ struct iwl_mvm_vif { struct ieee80211_key_conf __rcu *keys[2]; } bcn_prot; + u16 max_tx_op; + + u16 link_selection_res; + u8 link_selection_primary; + u8 primary_link; + struct iwl_mvm_esr_exit last_esr_exit; + u8 exit_same_reason_count; + struct wiphy_delayed_work prevent_esr_done_wk; + struct wiphy_delayed_work mlo_int_scan_wk; + struct wiphy_work unblock_esr_tpt_wk; + struct wiphy_delayed_work unblock_esr_tmp_non_bss_wk; + struct iwl_mvm_vif_link_info deflink; struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS]; }; @@ -486,10 +621,12 @@ enum iwl_scan_status { IWL_MVM_SCAN_REGULAR = BIT(0), IWL_MVM_SCAN_SCHED = BIT(1), IWL_MVM_SCAN_NETDETECT = BIT(2), + IWL_MVM_SCAN_INT_MLO = BIT(3), IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8), IWL_MVM_SCAN_STOPPING_SCHED = BIT(9), IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10), + IWL_MVM_SCAN_STOPPING_INT_MLO = BIT(11), IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | IWL_MVM_SCAN_STOPPING_REGULAR, @@ -497,6 +634,8 @@ enum iwl_scan_status { IWL_MVM_SCAN_STOPPING_SCHED, IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT, + IWL_MVM_SCAN_INT_MLO_MASK = IWL_MVM_SCAN_INT_MLO | + IWL_MVM_SCAN_STOPPING_INT_MLO, IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, IWL_MVM_SCAN_MASK = 0xff, @@ -518,7 +657,7 @@ enum iwl_mvm_sched_scan_pass_all_states { }; /** - * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure + * struct iwl_mvm_tt_mgmt - Thermal Throttling Management structure * @ct_kill_exit: worker to exit thermal kill * @dynamic_smps: Is thermal throttling enabled dynamic_smps? * @tx_backoff: The current thremal throttling tx backoff in uSec. @@ -537,14 +676,12 @@ struct iwl_mvm_tt_mgmt { #ifdef CONFIG_THERMAL /** - *struct iwl_mvm_thermal_device - thermal zone related data - * @temp_trips: temperature thresholds for report - * @fw_trips_index: keep indexes to original array - temp_trips + * struct iwl_mvm_thermal_device - thermal zone related data + * @trips: temperature thresholds for report * @tzone: thermal zone device data */ struct iwl_mvm_thermal_device { struct thermal_trip trips[IWL_MAX_DTS_TRIPS]; - u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; struct thermal_zone_device *tzone; }; @@ -643,24 +780,16 @@ struct iwl_mvm_tcm { * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn * @num_stored: number of mpdus stored in the buffer - * @buf_size: the reorder buffer size as set by the last addba request * @queue: queue of this reorder buffer - * @last_amsdu: track last ASMDU SN for duplication detection - * @last_sub_index: track ASMDU sub frame index for duplication detection * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state - * @mvm: mvm pointer, needed for frame timer context */ struct iwl_mvm_reorder_buffer { u16 head_sn; u16 num_stored; - u16 buf_size; int queue; - u16 last_amsdu; - u8 last_sub_index; bool valid; spinlock_t lock; - struct iwl_mvm *mvm; } ____cacheline_aligned_in_smp; /** @@ -682,6 +811,7 @@ __aligned(roundup_pow_of_two(sizeof(struct sk_buff_head))) * @tid: tid of the session * @baid: baid of the session * @timeout: the timeout set in the addba request + * @buf_size: the reorder buffer size as set by the last addba request * @entries_per_queue: # of buffers per queue, this actually gets * aligned up to avoid cache line sharing between queues * @last_rx: last rx jiffies, updated only if timeout passed from last update @@ -698,13 +828,14 @@ struct iwl_mvm_baid_data { u8 tid; u8 baid; u16 timeout; + u16 buf_size; u16 entries_per_queue; unsigned long last_rx; struct timer_list session_timer; struct iwl_mvm_baid_data __rcu **rcu_ptr; struct iwl_mvm *mvm; struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES]; - struct iwl_mvm_reorder_buf_entry entries[]; + struct iwl_mvm_reorder_buf_entry entries[] ____cacheline_aligned_in_smp; }; static inline struct iwl_mvm_baid_data * @@ -752,9 +883,10 @@ struct iwl_mvm_txq { struct list_head list; u16 txq_id; atomic_t tx_request; -#define IWL_MVM_TXQ_STATE_STOP_FULL 0 -#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1 -#define IWL_MVM_TXQ_STATE_READY 2 +#define IWL_MVM_TXQ_STATE_READY 0 +#define IWL_MVM_TXQ_STATE_STOP_FULL 1 +#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 2 +#define IWL_MVM_TXQ_STATE_STOP_AP_CSA 3 unsigned long state; }; @@ -832,6 +964,35 @@ struct iwl_mei_scan_filter { struct work_struct scan_work; }; +/** + * struct iwl_mvm_acs_survey_channel - per-channel survey information + * + * Stripped down version of &struct survey_info. + * + * @time: time in ms the radio was on the channel + * @time_busy: time in ms the channel was sensed busy + * @time_tx: time in ms spent transmitting data + * @time_rx: time in ms spent receiving data + * @noise: channel noise in dBm + */ +struct iwl_mvm_acs_survey_channel { + u32 time; + u32 time_busy; + u32 time_tx; + u32 time_rx; + s8 noise; +}; + +struct iwl_mvm_acs_survey { + struct iwl_mvm_acs_survey_channel *bands[NUM_NL80211_BANDS]; + + /* Overall number of channels */ + int n_channels; + + /* Storage space for per-channel information follows */ + struct iwl_mvm_acs_survey_channel channels[] __counted_by(n_channels); +}; + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -848,6 +1009,11 @@ struct iwl_mvm { spinlock_t async_handlers_lock; struct work_struct async_handlers_wk; + /* For async rx handlers that require the wiphy lock */ + struct wiphy_work async_handlers_wiphy_wk; + + struct wiphy_work trig_link_selection_wk; + struct work_struct roc_done_wk; unsigned long init_status; @@ -916,9 +1082,9 @@ struct iwl_mvm { /* data related to data path */ struct iwl_rx_phy_info last_phy_info; - struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX]; - struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX]; - unsigned long fw_link_ids_map; + struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_STATION_COUNT_MAX]; + /* note: fw_id_to_link_sta must be protected by wiphy and mvm mutexes */ + struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX]; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -940,7 +1106,7 @@ struct iwl_mvm { unsigned int max_scans; /* UMAC scan tracking */ - u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; + u32 scan_uid_status[IWL_MAX_UMAC_SCANS]; /* start time of last scan in TSF of the mac that requested the scan */ u64 scan_start; @@ -1007,10 +1173,8 @@ struct iwl_mvm { struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; - struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_MVM_FW_MAX_LINK_ID + 1]; + struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_FW_MAX_LINK_ID + 1]; - /* -1 for always, 0 for never, >0 for that many times */ - s8 fw_restart; u8 *error_recovery_buf; #ifdef CONFIG_IWLWIFI_LEDS @@ -1019,7 +1183,7 @@ struct iwl_mvm { struct ieee80211_vif *p2p_device_vif; -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; @@ -1030,6 +1194,7 @@ struct iwl_mvm { struct ieee80211_channel **nd_channels; int n_nd_channels; bool net_detect; + bool fast_resume; u8 offload_tid; #ifdef CONFIG_IWLWIFI_DEBUGFS bool d3_wake_sysassert; @@ -1042,8 +1207,11 @@ struct iwl_mvm { wait_queue_head_t rx_sync_waitq; - /* BT-Coex */ - struct iwl_bt_coex_profile_notif last_bt_notif; + /* BT-Coex - only one of those will be used */ + union { + struct iwl_bt_coex_prof_old_notif last_bt_notif; + struct iwl_bt_coex_profile_notif last_bt_wifi_loss; + }; struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; u8 bt_tx_prio; @@ -1184,19 +1352,30 @@ struct iwl_mvm { struct iwl_phy_specific_cfg phy_filters; #endif + /* report rx timestamp in ptp clock time */ + bool rx_ts_ptp; + unsigned long last_6ghz_passive_scan_jiffies; unsigned long last_reset_or_resume_time_jiffies; bool sta_remove_requires_queue_remove; bool mld_api_is_used; - bool pldr_sync; + /* + * Indicates that firmware will do a product reset (and then + * therefore fail to load) when we start it (due to OTP burn), + * if so don't dump errors etc. since this is expected. + */ + bool fw_product_reset; struct iwl_time_sync_data time_sync; struct iwl_mei_scan_filter mei_scan_filter; + struct iwl_mvm_acs_survey *acs_survey; + bool statistics_clear; + u32 bios_enable_puncturing; }; /* Extract MVM priv from op_mode and _hw */ @@ -1206,34 +1385,32 @@ struct iwl_mvm { #define IWL_MAC80211_GET_MVM(_hw) \ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) +DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mutex)) + /** * enum iwl_mvm_status - MVM status bits * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active - * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running + * @IWL_MVM_STATUS_ROC_P2P_RUNNING: remain-on-channel on P2P is running (when + * P2P is not over AUX) * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running - * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it) * @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log * if this is set, when intentionally triggered - * @IWL_MVM_STATUS_STARTING: starting mac, - * used to disable restart flow while in STARTING state */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_CTKILL, - IWL_MVM_STATUS_ROC_RUNNING, + IWL_MVM_STATUS_ROC_P2P_RUNNING, IWL_MVM_STATUS_HW_RESTART_REQUESTED, IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_FIRMWARE_RUNNING, - IWL_MVM_STATUS_NEED_FLUSH_P2P, IWL_MVM_STATUS_IN_D3, IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, - IWL_MVM_STATUS_STARTING, }; struct iwl_mvm_csme_conn_info { @@ -1317,7 +1494,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) static inline struct ieee80211_bss_conf * iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu) { - if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))) + if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf), + "erroneous FW link ID: %d\n", link_id)) return NULL; if (rcu) @@ -1408,8 +1586,7 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) && - !IWL_MVM_HW_CSUM_DISABLE; + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT); } static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) @@ -1530,9 +1707,9 @@ static inline struct agg_tx_status * iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp) { if (iwl_mvm_has_new_tx_api(mvm)) - return &((struct iwl_mvm_tx_resp *)tx_resp)->status; + return &((struct iwl_tx_resp *)tx_resp)->status; else - return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status; + return ((struct iwl_tx_resp_v3 *)tx_resp)->status; } static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) @@ -1555,12 +1732,19 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans) { - if ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM) && - !CSR_HW_RFID_IS_CDB(trans->hw_rf_id)) + if (CSR_HW_RFID_IS_CDB(trans->hw_rf_id)) + return false; + + switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + case IWL_CFG_RF_TYPE_FM: /* Step A doesn't support eSR */ return CSR_HW_RFID_STEP(trans->hw_rf_id); - - return false; + case IWL_CFG_RF_TYPE_WH: + case IWL_CFG_RF_TYPE_PE: + return true; + default: + return false; + } } static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm, @@ -1571,10 +1755,11 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm, if (vif->type == NL80211_IFTYPE_AP) return mvm->fw->ucode_capa.num_beacons; + /* Check if HW supports eSR or STR */ if (iwl_mvm_is_esr_supported(trans) || (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM && CSR_HW_RFID_IS_CDB(trans->hw_rf_id))) - return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM; + return IWL_FW_MAX_ACTIVE_LINKS_NUM; return 1; } @@ -1593,6 +1778,13 @@ static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, return iwl_mvm_ac_to_tx_fifo[ac]; } +static inline bool iwl_mvm_has_rlc_offload(struct iwl_mvm *mvm) +{ + return iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), + 0) >= 3; +} + struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ @@ -1601,7 +1793,7 @@ struct iwl_rate_info { u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ }; -void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); +void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend); int __iwl_mvm_mac_start(struct iwl_mvm *mvm); /****************** @@ -1713,6 +1905,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); +int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, + bool enable); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ @@ -1734,10 +1928,10 @@ static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) { - u8 rx_ant = mvm->fw->valid_tx_ant; + u8 rx_ant = mvm->fw->valid_rx_ant; if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant) - rx_ant &= mvm->nvm_data->valid_tx_ant; + rx_ant &= mvm->nvm_data->valid_rx_ant; if (mvm->set_rx_ant) rx_ant &= mvm->set_rx_ant; @@ -1769,6 +1963,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); +void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif); + /* * FW notifications / CMD responses handlers * Convention: iwl_mvm_rx_<NAME OF THE CMD> @@ -1805,18 +2001,20 @@ void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, /* MVM PHY */ struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm); int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *ap, u8 chains_static, u8 chains_dynamic); int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *ap, u8 chains_static, u8 chains_dynamic); void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); -u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); -u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); +u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef); +u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef); int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, u8 chains_static, u8 chains_dynamic); @@ -1825,7 +2023,7 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, + struct iwl_mvm_vif_link_info *link_info, __le32 *cck_rates, __le32 *ofdm_rates); void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -1884,6 +2082,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, @@ -1907,16 +2107,41 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band); /* Links */ +void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link); +int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf); int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, u32 changes, bool active); +int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf); int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf); +void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif); +u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id); + +struct iwl_mvm_link_sel_data { + u8 link_id; + const struct cfg80211_chan_def *chandef; + s32 signal; + u16 grade; +}; + +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf); +bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif, + const struct iwl_mvm_link_sel_data *a, + const struct iwl_mvm_link_sel_data *b); + +s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif); +#endif + /* AP and IBSS */ bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *ret); @@ -1992,9 +2217,13 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_scan_ies *ies); size_t iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); + int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); void iwl_mvm_scan_timeout_wk(struct work_struct *work); +int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* Scheduled scan */ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, @@ -2085,25 +2314,39 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); extern const struct file_operations iwl_dbgfs_d3_test_ops; -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_fast_suspend(struct iwl_mvm *mvm); +int iwl_mvm_fast_resume(struct iwl_mvm *mvm); #else static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } + +static inline void iwl_mvm_fast_suspend(struct iwl_mvm *mvm) +{ +} + +static inline int iwl_mvm_fast_resume(struct iwl_mvm *mvm) +{ + return 0; +} #endif void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, - struct iwl_wowlan_config_cmd *cmd); + struct iwl_wowlan_config_cmd_v6 *cmd); int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, - u32 cmd_flags); + u32 cmd_flags, + u8 sta_id); /* BT Coex */ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm); +void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -2133,11 +2376,9 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, {} #endif int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags); + struct ieee80211_vif *vif); int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags); + struct ieee80211_vif *vif); /* SMPS */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, @@ -2352,10 +2593,8 @@ void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed); -void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool tdls, bool cmd_q); + struct ieee80211_vif *vif); void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg); void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, @@ -2370,7 +2609,7 @@ u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time); int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm); -void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm); +void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm); #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -2391,6 +2630,10 @@ int iwl_mvm_sec_key_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf); +int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 sta_mask, + struct ieee80211_key_conf *keyconf); void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_vif_link_info *link, @@ -2429,6 +2672,21 @@ static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) } } +static inline u8 iwl_mvm_nl80211_band_from_phy(u8 phy_band) +{ + switch (phy_band) { + case PHY_BAND_24: + return NL80211_BAND_2GHZ; + case PHY_BAND_5: + return NL80211_BAND_5GHZ; + case PHY_BAND_6: + return NL80211_BAND_6GHZ; + default: + WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band); + return NL80211_BAND_5GHZ; + } +} + /* Channel Switch */ void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk); int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, @@ -2515,7 +2773,7 @@ static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm, static inline void iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci, - struct cfg80211_chan_def *chandef) + const struct cfg80211_chan_def *chandef) { enum nl80211_band band = chandef->chan->band; @@ -2586,6 +2844,13 @@ static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) sw_rfkill); } +static inline bool iwl_mvm_has_p2p_over_aux(struct iwl_mvm *mvm) +{ + u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD); + + return iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) >= 4; +} + static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm, struct sk_buff *skb) { @@ -2605,7 +2870,6 @@ static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm, void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool forbidden); -bool iwl_mvm_is_vendor_in_approved_list(void); /* Callbacks for ieee80211_ops */ void iwl_mvm_mac_tx(struct ieee80211_hw *hw, @@ -2621,7 +2885,7 @@ int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); int iwl_mvm_mac_start(struct ieee80211_hw *hw); void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type); -void iwl_mvm_mac_stop(struct ieee80211_hw *hw); +void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend); static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) { return 0; @@ -2657,7 +2921,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, bool more_data); int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value); void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u32 changed); + struct ieee80211_link_sta *link_sta, u32 changed); void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info); @@ -2691,11 +2955,12 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw); void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw); -int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, +int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw); void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw); @@ -2724,14 +2989,58 @@ void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1, struct iwl_mvm_vif *vif2); bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif); -int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, +int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, + struct ieee80211_bss_conf *bss_conf, s16 tx_power); int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_set_hw_timestamp *hwts); int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif); bool iwl_mvm_enable_fils(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx); -void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool valid_links_changed); + +struct cfg80211_chan_def * +iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx); + +void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, + u32 duration_ms, + u32 *duration_tu, + u32 *delay); +int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, + struct ieee80211_channel *channel, + struct ieee80211_vif *vif, + int duration, enum iwl_roc_activity activity); + +/* EMLSR */ +bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason, + u8 link_to_keep); +int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason); +void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason); +void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason, + u8 link_to_keep); +s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm, + const struct cfg80211_chan_def *chandef, + bool low); +void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + int link_id); +bool +iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + s32 link_rssi, + bool primary); +int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + unsigned int link_id, bool active); + +void +iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + bool is_ap); #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index c0dd441e800e..80ec59c58ae4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -9,8 +9,7 @@ #include "iwl-trans.h" #include "iwl-csr.h" #include "mvm.h" -#include "iwl-eeprom-parse.h" -#include "iwl-eeprom-read.h" +#include "iwl-nvm-utils.h" #include "iwl-nvm-parse.h" #include "iwl-prph.h" #include "fw/acpi.h" @@ -590,7 +589,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) return -EIO; if (iwl_mvm_is_wifi_mcc_supported(mvm) && - !iwl_acpi_get_mcc(mvm->dev, mcc)) { + !iwl_bios_get_mcc(&mvm->fwrt, mcc)) { kfree(regd); regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, MCC_SOURCE_BIOS, NULL); @@ -612,6 +611,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, char mcc[3]; struct ieee80211_regdomain *regd; int wgds_tbl_idx; + bool changed = false; lockdep_assert_held(&mvm->mutex); @@ -631,10 +631,15 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, IWL_DEBUG_LAR(mvm, "RX: received chub update mcc cmd (mcc '%s' src %d)\n", mcc, src); - regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); + regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, &changed); if (IS_ERR_OR_NULL(regd)) return; + if (!changed) { + IWL_DEBUG_LAR(mvm, "RX: No change in the regulatory data\n"); + goto out; + } + wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); if (wgds_tbl_idx < 1) IWL_DEBUG_INFO(mvm, @@ -645,5 +650,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, wgds_tbl_idx); regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); + +out: kfree(regd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c index dfb16ca5b438..15d4369678a2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2021-2022 Intel Corporation + * Copyright (C) 2012-2014, 2021-2022, 2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -10,7 +10,7 @@ #include "mvm.h" void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, - struct iwl_wowlan_config_cmd *cmd) + struct iwl_wowlan_config_cmd_v6 *cmd) { int i; @@ -30,7 +30,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, - u32 cmd_flags) + u32 cmd_flags, + u8 sta_id) { union { struct iwl_proto_offload_cmd_v1 v1; @@ -205,6 +206,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, if (!disable_offloading) common->enabled = cpu_to_le32(enabled); + if (ver >= 4) + cmd.v4.sta_id = cpu_to_le32(sta_id); + hcmd.len[0] = size; return iwl_mvm_send_cmd(mvm, &hcmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index adbbe19aeae5..984f407f7027 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -18,7 +18,7 @@ #include "iwl-modparams.h" #include "mvm.h" #include "iwl-phy-db.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "iwl-csr.h" #include "iwl-io.h" #include "iwl-prph.h" @@ -34,19 +34,15 @@ #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_LICENSE("GPL"); -MODULE_IMPORT_NS(IWLWIFI); +MODULE_IMPORT_NS("IWLWIFI"); static const struct iwl_op_mode_ops iwl_mvm_ops; static const struct iwl_op_mode_ops iwl_mvm_ops_mq; struct iwl_mvm_mod_params iwlmvm_mod_params = { .power_scheme = IWL_POWER_SCHEME_BPS, - /* rest of fields are 0 by default */ }; -module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444); -MODULE_PARM_DESC(init_dbg, - "set to true to debug an ASSERT in INIT fw (default: false"); module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444); MODULE_PARM_DESC(power_scheme, "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); @@ -145,6 +141,61 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); } +static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data; + struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + + /* FW recommendations is only for entering EMLSR */ + if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active) + return; + + if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER) + iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW); + else + iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, + iwl_mvm_get_primary_link(vif)); +} + +static void iwl_mvm_rx_esr_trans_fail_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_esr_trans_fail_notif *notif = (void *)pkt->data; + struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + u8 fw_link_id = le32_to_cpu(notif->link_id); + struct ieee80211_bss_conf *bss_conf; + + if (IS_ERR_OR_NULL(vif)) + return; + + IWL_DEBUG_INFO(mvm, "Failed to %s eSR on link %d, reason %d\n", + le32_to_cpu(notif->activation) ? "enter" : "exit", + le32_to_cpu(notif->link_id), + le32_to_cpu(notif->err_code)); + + /* we couldn't go back to single link, disconnect */ + if (!le32_to_cpu(notif->activation)) { + iwl_mvm_connection_loss(mvm, vif, "emlsr exit failed"); + return; + } + + bss_conf = iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, fw_link_id, false); + if (IWL_FW_CHECK(mvm, !bss_conf, + "FW reported failure to activate EMLSR on a non-existing link: %d\n", + fw_link_id)) + return; + + /* + * We failed to activate the second link and enter EMLSR, we need to go + * back to single link. + */ + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_FAIL_ENTRY, + bss_conf->link_id); +} + static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { @@ -157,13 +208,14 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA)) return; - vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id); + /* FIXME: should fetch the link and not the vif */ + vif = iwl_mvm_get_vif_by_macid(mvm, notif->link_id); if (!vif || vif->type != NL80211_IFTYPE_STATION) return; - if (!vif->bss_conf.chandef.chan || - vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ || - vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40) + if (!vif->bss_conf.chanreq.oper.chan || + vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ || + vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40) return; if (!vif->cfg.assoc) @@ -219,7 +271,7 @@ void iwl_mvm_update_link_smps(struct ieee80211_vif *vif, return; if (mvm->fw_static_smps_request && - link_conf->chandef.width == NL80211_CHAN_WIDTH_160 && + link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 && link_conf->he_support) mode = IEEE80211_SMPS_STATIC; @@ -247,6 +299,12 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_thermal_dual_chain_request *req = (void *)pkt->data; + /* firmware is expected to handle that in RLC offload mode */ + if (IWL_FW_CHECK(mvm, iwl_mvm_has_rlc_offload(mvm), + "Got THERMAL_DUAL_CHAIN_REQUEST (0x%x) in RLC offload mode\n", + req->event)) + return; + /* * We could pass it to the iterator data, but also need to remember * it for new interfaces that are added while in this state. @@ -259,7 +317,7 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, } /** - * enum iwl_rx_handler_context context for Rx handler + * enum iwl_rx_handler_context: context for Rx handler * @RX_HANDLER_SYNC : this means that it will be called in the Rx path * which can't acquire mvm->mutex. * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex @@ -267,15 +325,19 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, * it will be called from a worker with mvm->mutex held. * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the * mutex itself, it will be called from a worker without mvm->mutex held. + * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock + * and mvm->mutex. Will be handled with the wiphy_work queue infra + * instead of regular work queue. */ enum iwl_rx_handler_context { RX_HANDLER_SYNC, RX_HANDLER_ASYNC_LOCKED, RX_HANDLER_ASYNC_UNLOCKED, + RX_HANDLER_ASYNC_LOCKED_WIPHY, }; /** - * struct iwl_rx_handlers handler for FW notification + * struct iwl_rx_handlers: handler for FW notification * @cmd_id: command id * @min_size: minimum size to expect for the notification * @context: see &iwl_rx_handler_context @@ -307,7 +369,7 @@ struct iwl_rx_handlers { */ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC, - struct iwl_mvm_tx_resp), + struct iwl_tx_resp), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC, struct iwl_mvm_ba_notif), @@ -315,8 +377,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC, struct iwl_tlc_update_notif), - RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, - RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif), + RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_old_notif, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_bt_coex_prof_old_notif), + RX_HANDLER_GRP(BT_COEX_GROUP, PROFILE_NOTIF, iwl_mvm_rx_bt_coex_notif, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_bt_coex_profile_notif), RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, @@ -324,7 +390,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF, iwl_mvm_handle_rx_system_oper_stats, - RX_HANDLER_ASYNC_LOCKED, + RX_HANDLER_ASYNC_LOCKED_WIPHY, struct iwl_system_statistics_notif_oper), RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF, iwl_mvm_handle_rx_system_oper_part1_stats, @@ -343,7 +409,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_SYNC, struct iwl_time_event_notif), RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF, iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC, - struct iwl_mvm_session_prot_notif), + struct iwl_session_prot_notif), RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif), @@ -360,14 +426,21 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_rx_scan_match_found, RX_HANDLER_SYNC), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, - RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete), + RX_HANDLER_ASYNC_LOCKED, + struct iwl_umac_scan_complete), RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC, struct iwl_umac_scan_iter_complete_notif), - RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, - RX_HANDLER_SYNC, struct iwl_missed_beacons_notif), + RX_HANDLER(MISSED_BEACONS_NOTIFICATION, + iwl_mvm_rx_missed_beacons_notif_legacy, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_missed_beacons_notif_v4), + RX_HANDLER_GRP(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF, + iwl_mvm_rx_missed_beacons_notif, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_missed_beacons_notif), RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC, struct iwl_error_resp), RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, @@ -418,6 +491,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_channel_switch_error_notif, RX_HANDLER_ASYNC_UNLOCKED, struct iwl_channel_switch_error_notif), + + RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF, + iwl_mvm_rx_esr_mode_notif, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_mvm_esr_mode_notif), + RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF, iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_datapath_monitor_notif), @@ -440,8 +519,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC, struct iwl_time_msmt_cfm_notify), RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF, - iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC, + iwl_mvm_rx_roc_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_roc_notif), + RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF, + iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED, + struct iwl_umac_scan_channel_survey_notif), + RX_HANDLER_GRP(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF, + iwl_mvm_rx_esr_trans_fail_notif, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_esr_trans_fail_notif), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -538,6 +624,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(D0I3_END_CMD), HCMD_NAME(LTR_CONFIG), HCMD_NAME(LDBG_CONFIG_CMD), + HCMD_NAME(DEBUG_LOG_MSG), }; /* Please keep this array *SORTED* by hex value. @@ -545,6 +632,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { */ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), + HCMD_NAME(SOC_CONFIGURATION_CMD), HCMD_NAME(INIT_EXTENDED_CFG_CMD), HCMD_NAME(FW_ERROR_RECOVERY_CMD), HCMD_NAME(RFI_CONFIG_CMD), @@ -559,8 +647,10 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { + HCMD_NAME(LOW_LATENCY_CMD), HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), HCMD_NAME(SESSION_PROTECTION_CMD), + HCMD_NAME(CANCEL_CHANNEL_SWITCH_CMD), HCMD_NAME(MAC_CONFIG_CMD), HCMD_NAME(LINK_CONFIG_CMD), HCMD_NAME(STA_CONFIG_CMD), @@ -568,8 +658,12 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(STA_REMOVE_CMD), HCMD_NAME(STA_DISABLE_TX_CMD), HCMD_NAME(ROC_CMD), + HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF), HCMD_NAME(ROC_NOTIF), + HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF), + HCMD_NAME(MISSED_VAP_NOTIF), HCMD_NAME(SESSION_PROTECTION_NOTIF), + HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF), HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), }; @@ -581,6 +675,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD), + HCMD_NAME(AP_TX_POWER_CONSTRAINTS_CMD), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; @@ -592,6 +687,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), + HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD), + HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), HCMD_NAME(STA_HE_CTXT_CMD), HCMD_NAME(RLC_CONFIG_CMD), HCMD_NAME(RFH_QUEUE_CONFIG_CMD), @@ -599,6 +696,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), HCMD_NAME(SCD_QUEUE_CONFIG_CMD), HCMD_NAME(SEC_KEY_CMD), + HCMD_NAME(ESR_MODE_NOTIF), HCMD_NAME(MONITOR_NOTIF), HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST), HCMD_NAME(STA_PM_NOTIF), @@ -617,7 +715,23 @@ static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = { /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ +static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { + HCMD_NAME(LMAC_RD_WR), + HCMD_NAME(UMAC_RD_WR), + HCMD_NAME(HOST_EVENT_CFG), + HCMD_NAME(DBGC_SUSPEND_RESUME), + HCMD_NAME(BUFFER_ALLOCATION), + HCMD_NAME(GET_TAS_STATUS), + HCMD_NAME(FW_DUMP_COMPLETE_CMD), + HCMD_NAME(FW_CLEAR_BUFFER), + HCMD_NAME(MFU_ASSERT_DUMP_NTF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ static const struct iwl_hcmd_names iwl_mvm_scan_names[] = { + HCMD_NAME(CHANNEL_SURVEY_NOTIF), HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF), }; @@ -656,6 +770,13 @@ static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { HCMD_NAME(TAS_CONFIG), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_bt_coex_names[] = { + HCMD_NAME(PROFILE_NOTIF), +}; + static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), @@ -665,14 +786,18 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names), [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), + [BT_COEX_GROUP] = HCMD_ARR(iwl_mvm_bt_coex_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), + [DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names), [STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names), }; /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); +static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy, + struct wiphy_work *work); static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) { @@ -682,7 +807,7 @@ static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) if (!backoff) return 0; - dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev); + iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit); while (backoff->pwr) { if (dflt_pwr_limit >= backoff->pwr) @@ -701,20 +826,18 @@ static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) struct ieee80211_vif *tx_blocked_vif; struct iwl_mvm_vif *mvmvif; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); if (!tx_blocked_vif) - goto unlock; + return; mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); -unlock: - mutex_unlock(&mvm->mutex); } static void iwl_mvm_fwrt_dump_start(void *ctx) @@ -731,21 +854,12 @@ static void iwl_mvm_fwrt_dump_end(void *ctx) mutex_unlock(&mvm->mutex); } -static bool iwl_mvm_fwrt_fw_running(void *ctx) -{ - return iwl_mvm_firmware_running(ctx); -} - static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) { struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; - int ret; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_send_cmd(mvm, host_cmd); - mutex_unlock(&mvm->mutex); - return ret; + guard(mvm)(mvm); + return iwl_mvm_send_cmd(mvm, host_cmd); } static bool iwl_mvm_d3_debug_enable(void *ctx) @@ -756,7 +870,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx) static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, - .fw_running = iwl_mvm_fwrt_fw_running, .send_hcmd = iwl_mvm_fwrt_send_hcmd, .d3_debug_enable = iwl_mvm_d3_debug_enable, }; @@ -812,8 +925,7 @@ get_nvm_from_fw: ret = iwl_mvm_init_mcc(mvm); } - if (!iwlmvm_mod_params.init_dbg || !ret) - iwl_mvm_stop_device(mvm); + iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); wiphy_unlock(mvm->hw->wiphy); @@ -823,7 +935,7 @@ get_nvm_from_fw: IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); /* no longer need this regardless of failure or not */ - mvm->pldr_sync = false; + mvm->fw_product_reset = false; return ret; } @@ -1136,6 +1248,29 @@ static const struct iwl_mei_ops mei_ops = { .nic_stolen = iwl_mvm_mei_nic_stolen, }; +static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (ieee80211_vif_is_mld(vif) && mvmvif->authorized) + iwl_mvm_select_links(mvmvif->mvm, vif); +} + +static void iwl_mvm_trig_link_selection(struct wiphy *wiphy, + struct wiphy_work *wk) +{ + struct iwl_mvm *mvm = + container_of(wk, struct iwl_mvm, trig_link_selection_wk); + + mutex_lock(&mvm->mutex); + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_find_link_selection_vif, + NULL); + mutex_unlock(&mvm->mutex); +} + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -1151,14 +1286,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, size_t scan_size; u32 min_backoff; struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; + int err; /* - * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station + * We use IWL_STATION_COUNT_MAX to check the validity of the station * index all over the driver - check that its value corresponds to the * array size. */ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != - IWL_MVM_STATION_COUNT_MAX); + IWL_STATION_COUNT_MAX); /******************************** * 1. Allocating and configuring HW data @@ -1168,7 +1304,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_has_mld_api(fw) ? &iwl_mvm_mld_hw_ops : &iwl_mvm_hw_ops); if (!hw) - return NULL; + return ERR_PTR(-ENOMEM); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) max_agg = 512; @@ -1194,9 +1330,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, &iwl_mvm_sanitize_ops, mvm, dbgfs_dir); - iwl_mvm_get_acpi_tables(mvm); + iwl_mvm_get_bios_tables(mvm); iwl_uefi_get_sgom_table(trans, &mvm->fwrt); iwl_uefi_get_step_table(trans); + iwl_bios_setup_step(trans, &mvm->fwrt); mvm->init_status = 0; @@ -1212,11 +1349,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); - if (WARN_ON(trans->num_rx_queues > 1)) + if (WARN_ON(trans->num_rx_queues > 1)) { + err = -EINVAL; goto out_free; + } } - mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; + mvm->bios_enable_puncturing = iwl_uefi_get_puncturing(&mvm->fwrt); if (iwl_mvm_has_new_tx_api(mvm)) { /* @@ -1265,6 +1404,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_LIST_HEAD(&mvm->add_stream_txqs); spin_lock_init(&mvm->add_stream_lock); + wiphy_work_init(&mvm->async_handlers_wiphy_wk, + iwl_mvm_async_handlers_wiphy_wk); + + wiphy_work_init(&mvm->trig_link_selection_wk, + iwl_mvm_trig_link_selection); + init_waitqueue_head(&mvm->rx_sync_waitq); mvm->queue_sync_state = 0; @@ -1283,8 +1428,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, 5); /* we only support up to version 9 */ - if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) + if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) { + err = -EINVAL; goto out_free; + } /* * Populate the state variables that the transport layer needs @@ -1294,24 +1441,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - switch (iwlwifi_mod_params.amsdu_size) { - case IWL_AMSDU_DEF: - trans_cfg.rx_buf_size = IWL_AMSDU_4K; - break; - case IWL_AMSDU_4K: - trans_cfg.rx_buf_size = IWL_AMSDU_4K; - break; - case IWL_AMSDU_8K: - trans_cfg.rx_buf_size = IWL_AMSDU_8K; - break; - case IWL_AMSDU_12K: - trans_cfg.rx_buf_size = IWL_AMSDU_12K; - break; - default: - pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, - iwlwifi_mod_params.amsdu_size); - trans_cfg.rx_buf_size = IWL_AMSDU_4K; - } + trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size(); trans->wide_cmd_header = true; trans_cfg.bc_table_dword = @@ -1327,10 +1457,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, driver_data[2]); - /* Set a short watchdog for the command queue */ - trans_cfg.cmd_q_wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, NULL, false, true); - snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), "%.31s", fw->fw_version); @@ -1354,9 +1480,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv; trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg; - memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv, - sizeof(trans->dbg.conf_tlv)); - trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; @@ -1368,12 +1491,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->phy_db = iwl_phy_db_init(trans); if (!mvm->phy_db) { IWL_ERR(mvm, "Cannot init phy_db\n"); + err = -ENOMEM; goto out_free; } - IWL_INFO(mvm, "Detected %s, REV=0x%X\n", - mvm->trans->name, mvm->trans->hw_rev); - if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; else @@ -1383,13 +1504,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, scan_size = iwl_mvm_scan_size(mvm); mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); - if (!mvm->scan_cmd) + if (!mvm->scan_cmd) { + err = -ENOMEM; goto out_free; + } mvm->scan_cmd_size = scan_size; /* invalidate ids to prevent accidental removal of sta_id 0 */ - mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; - mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; + mvm->aux_sta.sta_id = IWL_INVALID_STA; + mvm->snif_sta.sta_id = IWL_INVALID_STA; /* Set EBS as successful as long as not stated otherwise by the FW. */ mvm->last_ebs_successful = true; @@ -1413,7 +1536,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter); - if (iwl_mvm_start_get_nvm(mvm)) { + err = iwl_mvm_start_get_nvm(mvm); + if (err) { /* * Getting NVM failed while CSME is the owner, but we are * registered to MEI, we'll get the NVM later when it'll be @@ -1426,7 +1550,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } - if (iwl_mvm_start_post_nvm(mvm)) + err = iwl_mvm_start_post_nvm(mvm); + if (err) goto out_thermal_exit; return op_mode; @@ -1441,14 +1566,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_fw_flush_dumps(&mvm->fwrt); iwl_fw_runtime_free(&mvm->fwrt); - if (iwlmvm_mod_params.init_dbg) - return op_mode; iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); iwl_trans_op_mode_leave(trans); ieee80211_free_hw(mvm->hw); - return NULL; + return ERR_PTR(err); } void iwl_mvm_stop_device(struct iwl_mvm *mvm) @@ -1459,6 +1582,8 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm) clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); + iwl_mvm_pause_tcm(mvm, false); + iwl_fw_dbg_stop_sync(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); iwl_free_fw_paging(&mvm->fwrt); @@ -1519,6 +1644,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) kfree(mvm->temp_nvm_data); for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); + kfree(mvm->acs_survey); cancel_delayed_work_sync(&mvm->tcm.work); @@ -1551,35 +1677,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) spin_unlock_bh(&mvm->async_handlers_lock); } -static void iwl_mvm_async_handlers_wk(struct work_struct *wk) +/* + * This function receives a bitmap of rx async handler contexts + * (&iwl_rx_handler_context) to handle, and runs only them + */ +static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm, + u8 contexts) { - struct iwl_mvm *mvm = - container_of(wk, struct iwl_mvm, async_handlers_wk); struct iwl_async_handler_entry *entry, *tmp; LIST_HEAD(local_list); - /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ - /* - * Sync with Rx path with a lock. Remove all the entries from this list, - * add them to a local one (lock free), and then handle them. + * Sync with Rx path with a lock. Remove all the entries of the + * wanted contexts from this list, add them to a local one (lock free), + * and then handle them. */ spin_lock_bh(&mvm->async_handlers_lock); - list_splice_init(&mvm->async_handlers_list, &local_list); + list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { + if (!(BIT(entry->context) & contexts)) + continue; + list_del(&entry->list); + list_add_tail(&entry->list, &local_list); + } spin_unlock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &local_list, list) { - if (entry->context == RX_HANDLER_ASYNC_LOCKED) + if (entry->context != RX_HANDLER_ASYNC_UNLOCKED) mutex_lock(&mvm->mutex); entry->fn(mvm, &entry->rxb); iwl_free_rxb(&entry->rxb); list_del(&entry->list); - if (entry->context == RX_HANDLER_ASYNC_LOCKED) + if (entry->context != RX_HANDLER_ASYNC_UNLOCKED) mutex_unlock(&mvm->mutex); kfree(entry); } } +static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy, + struct wiphy_work *wk) +{ + struct iwl_mvm *mvm = + container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk); + u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY); + + iwl_mvm_async_handlers_by_context(mvm, contexts); +} + +static void iwl_mvm_async_handlers_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = + container_of(wk, struct iwl_mvm, async_handlers_wk); + u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) | + BIT(RX_HANDLER_ASYNC_UNLOCKED); + + iwl_mvm_async_handlers_by_context(mvm, contexts); +} + static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -1659,7 +1812,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, spin_lock(&mvm->async_handlers_lock); list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); - schedule_work(&mvm->async_handlers_wk); + if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY) + wiphy_work_queue(mvm->hw->wiphy, + &mvm->async_handlers_wiphy_wk); + else + schedule_work(&mvm->async_handlers_wk); break; } } @@ -1788,12 +1945,8 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) { - bool state = iwl_mvm_is_radio_killed(mvm); - - if (state) - wake_up(&mvm->rx_sync_waitq); - - wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); + wiphy_rfkill_set_hw_state(mvm->hw->wiphy, + iwl_mvm_is_radio_killed(mvm)); } void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) @@ -1853,27 +2006,62 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) ieee80211_free_txskb(mvm->hw, skb); } -struct iwl_mvm_reprobe { - struct device *dev; - struct work_struct work; -}; +static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, + enum iwl_fw_error_type type) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + iwl_abort_notification_waits(&mvm->notif_wait); + iwl_dbg_tlv_del_timers(mvm->trans); + + if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL) + IWL_ERR(mvm, "Command queue full!\n"); + else if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && + !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, + &mvm->status)) + iwl_mvm_dump_nic_error_log(mvm); -static void iwl_mvm_reprobe_wk(struct work_struct *wk) + /* + * This should be first thing before trying to collect any + * data to avoid endless loops if any HW error happens while + * collecting debug data. + * It might not actually be true that we'll restart, but the + * setting of the bit doesn't matter if we're going to be + * unbound either. + */ + if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT) + set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); +} + +static void iwl_mvm_dump_error(struct iwl_op_mode *op_mode, + struct iwl_fw_error_dump_mode *mode) { - struct iwl_mvm_reprobe *reprobe; - - reprobe = container_of(wk, struct iwl_mvm_reprobe, work); - if (device_reprobe(reprobe->dev)) - dev_err(reprobe->dev, "reprobe failed!\n"); - put_device(reprobe->dev); - kfree(reprobe); - module_put(THIS_MODULE); + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + /* if we come in from opmode we have the mutex held */ + if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) { + lockdep_assert_held(&mvm->mutex); + iwl_fw_error_collect(&mvm->fwrt); + } else { + mutex_lock(&mvm->mutex); + if (mode->context != IWL_ERR_CONTEXT_ABORT) + iwl_fw_error_collect(&mvm->fwrt); + mutex_unlock(&mvm->mutex); + } } -void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) +static bool iwl_mvm_sw_reset(struct iwl_op_mode *op_mode, + enum iwl_fw_error_type type) { - iwl_abort_notification_waits(&mvm->notif_wait); - iwl_dbg_tlv_del_timers(mvm->trans); + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + + /* + * If the firmware crashes while we're already considering it + * to be dead then don't ask for a restart, that cannot do + * anything useful anyway. + */ + if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) + return false; /* * This is a bit racy, but worst case we tell mac80211 about @@ -1888,52 +2076,11 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) iwl_mvm_report_scan_aborted(mvm); /* - * If we're restarting already, don't cycle restarts. * If INIT fw asserted, it will likely fail again. * If WoWLAN fw asserted, don't restart either, mac80211 * can't recover this since we're already half suspended. */ - if (!mvm->fw_restart && fw_error) { - iwl_fw_error_collect(&mvm->fwrt, false); - } else if (test_bit(IWL_MVM_STATUS_STARTING, - &mvm->status)) { - IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n"); - } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - struct iwl_mvm_reprobe *reprobe; - - IWL_ERR(mvm, - "Firmware error during reconfiguration - reprobe!\n"); - - /* - * get a module reference to avoid doing this while unloading - * anyway and to avoid scheduling a work with code that's - * being removed. - */ - if (!try_module_get(THIS_MODULE)) { - IWL_ERR(mvm, "Module is being unloaded - abort\n"); - return; - } - - reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); - if (!reprobe) { - module_put(THIS_MODULE); - return; - } - reprobe->dev = get_device(mvm->trans->dev); - INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); - schedule_work(&reprobe->work); - } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, - &mvm->status)) { - IWL_ERR(mvm, "HW restart already requested, but not started\n"); - } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && - mvm->hw_registered && - !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { - /* This should be first thing before trying to collect any - * data to avoid endless loops if any HW error happens while - * collecting debug data. - */ - set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); - + if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered) { if (mvm->fw->ucode_capa.error_log_size) { u32 src_size = mvm->fw->ucode_capa.error_log_size; u32 src_addr = mvm->fw->ucode_capa.error_log_addr; @@ -1948,67 +2095,45 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) } } - iwl_fw_error_collect(&mvm->fwrt, false); - - if (fw_error && mvm->fw_restart > 0) { - mvm->fw_restart--; - ieee80211_restart_hw(mvm->hw); - } else if (mvm->fwrt.trans->dbg.restart_required) { + if (mvm->fwrt.trans->dbg.restart_required) { IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n"); - mvm->fwrt.trans->dbg.restart_required = FALSE; + mvm->fwrt.trans->dbg.restart_required = false; ieee80211_restart_hw(mvm->hw); + return true; } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { ieee80211_restart_hw(mvm->hw); + return true; } } -} -static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && - !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, - &mvm->status)) - iwl_mvm_dump_nic_error_log(mvm); - - if (sync) { - iwl_fw_error_collect(&mvm->fwrt, true); - /* - * Currently, the only case for sync=true is during - * shutdown, so just stop in this case. If/when that - * changes, we need to be a bit smarter here. - */ - return; - } - - /* - * If the firmware crashes while we're already considering it - * to be dead then don't ask for a restart, that cannot do - * anything useful anyway. - */ - if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) - return; - - iwl_mvm_nic_restart(mvm, false); + return false; } -static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) +static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - WARN_ON(1); - iwl_mvm_nic_restart(mvm, true); + iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data); } -static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode, - enum iwl_fw_ini_time_point tp_id, - union iwl_dbg_tlv_tp_data *tp_data) +#ifdef CONFIG_PM_SLEEP +static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data); + mutex_lock(&mvm->mutex); + clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + iwl_mvm_stop_device(mvm); + mvm->fast_resume = false; + mutex_unlock(&mvm->mutex); } +#else +static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode) +{} +#endif #define IWL_MVM_COMMON_OPS \ /* these could be differentiated */ \ @@ -2017,12 +2142,14 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode, .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \ .free_skb = iwl_mvm_free_skb, \ .nic_error = iwl_mvm_nic_error, \ - .cmd_queue_full = iwl_mvm_cmd_queue_full, \ + .dump_error = iwl_mvm_dump_error, \ + .sw_reset = iwl_mvm_sw_reset, \ .nic_config = iwl_mvm_nic_config, \ /* as we only register one, these MUST be common! */ \ .start = iwl_op_mode_mvm_start, \ .stop = iwl_op_mode_mvm_stop, \ - .time_point = iwl_op_mode_mvm_time_point + .time_point = iwl_op_mode_mvm_time_point, \ + .device_powered_off = iwl_op_mode_mvm_device_powered_off static const struct iwl_op_mode_ops iwl_mvm_ops = { IWL_MVM_COMMON_OPS, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 334d1f59f6e4..5e7e2926be0c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -9,7 +9,7 @@ #include "mvm.h" /* Maps the driver specific channel width definition to the fw values */ -u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) +u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: @@ -31,9 +31,9 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) /* * Maps the driver specific control channel position (relative to the center - * freq) definitions to the the fw values + * freq) definitions to the fw values */ -u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) +u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef) { int offs = chandef->chan->center_freq - chandef->center_freq1; int abs_offs = abs(offs); @@ -116,7 +116,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm, static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct iwl_phy_context_cmd_v1 *cmd, - struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { struct iwl_phy_context_cmd_tail *tail = @@ -137,7 +137,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm, static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct iwl_phy_context_cmd *cmd, - struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm, @@ -159,7 +159,11 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, .phy_id = cpu_to_le32(ctxt->id), }; - if (ctxt->rlc_disabled) + /* From version 3, RLC is offloaded to firmware, so the driver no + * longer needs to send cmd.rlc, note that we are not using any + * other fields in the command - don't send it. + */ + if (iwl_mvm_has_rlc_offload(mvm) || ctxt->rlc_disabled) return 0; if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, @@ -197,14 +201,18 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, */ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *ap, u8 chains_static, u8 chains_dynamic, u32 action) { int ret; int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1); - if (ver == 3 || ver == 4) { + if (ver < 5 || !ap || !ap->chan) + ap = NULL; + + if (ver >= 3 && ver <= 6) { struct iwl_phy_context_cmd cmd = {}; /* Set the command header fields */ @@ -215,6 +223,14 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, chains_static, chains_dynamic); + if (ap) { + cmd.sbb_bandwidth = iwl_mvm_get_channel_width(ap); + cmd.sbb_ctrl_channel_loc = iwl_mvm_get_ctrl_pos(ap); + } + + if (ver == 6) + cmd.puncture_mask = cpu_to_le16(chandef->punctured); + ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd); } else if (ver < 3) { @@ -254,7 +270,8 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, * Send a command to add a PHY context based on the current HW configuration. */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *ap, u8 chains_static, u8 chains_dynamic) { int ret; @@ -267,7 +284,7 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt->width = chandef->width; ctxt->center_freq1 = chandef->center_freq1; - ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap, chains_static, chains_dynamic, FW_CTXT_ACTION_ADD); @@ -300,7 +317,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) * changed. */ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, - struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *chandef, + const struct cfg80211_chan_def *ap, u8 chains_static, u8 chains_dynamic) { enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY; @@ -324,7 +342,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, int ret; /* ... remove it here ...*/ - ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, NULL, chains_static, chains_dynamic, FW_CTXT_ACTION_REMOVE); if (ret) @@ -338,7 +356,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt->width = chandef->width; ctxt->center_freq1 = chandef->center_freq1; - return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap, chains_static, chains_dynamic, action); } @@ -358,7 +376,7 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT); - iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, 1, 1, + iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, NULL, 1, 1, FW_CTXT_ACTION_REMOVE); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 1b9b06e0443f..a386b315e52f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -20,8 +20,7 @@ static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, - struct iwl_beacon_filter_cmd *cmd, - u32 flags) + struct iwl_beacon_filter_cmd *cmd) { u16 len; @@ -62,7 +61,7 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, len = offsetof(struct iwl_beacon_filter_cmd, bf_threshold_absolute_low); - return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags, + return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, 0, len, cmd); } @@ -80,7 +79,7 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, cmd->bf_roaming_state = cpu_to_le32(-vif->bss_conf.cqm_rssi_thold); } - cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); + cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->ba_enabled); } static void iwl_mvm_power_log(struct iwl_mvm *mvm, @@ -212,19 +211,37 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; } -static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +struct iwl_allow_uapsd_iface_iterator_data { + struct ieee80211_vif *current_vif; + bool allow_uapsd; +}; + +static void iwl_mvm_allow_uapsd_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) { - bool *is_p2p_standalone = _data; + struct iwl_allow_uapsd_iface_iterator_data *data = _data; + struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif *curr_mvmvif = + iwl_mvm_vif_from_mac80211(data->current_vif); - switch (ieee80211_vif_type_p2p(vif)) { - case NL80211_IFTYPE_P2P_GO: + /* exclude the given vif */ + if (vif == data->current_vif) + return; + + switch (vif->type) { case NL80211_IFTYPE_AP: - *is_p2p_standalone = false; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_NAN: + data->allow_uapsd = false; break; case NL80211_IFTYPE_STATION: - if (vif->cfg.assoc) - *is_p2p_standalone = false; + /* allow UAPSD if P2P interface and BSS station interface share + * the same channel. + */ + if (vif->cfg.assoc && other_mvmvif->deflink.phy_ctxt && + curr_mvmvif->deflink.phy_ctxt && + other_mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id) + data->allow_uapsd = false; break; default: @@ -236,6 +253,10 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_allow_uapsd_iface_iterator_data data = { + .current_vif = vif, + .allow_uapsd = true, + }; if (ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr, vif->cfg.ap_addr)) @@ -250,88 +271,75 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, IEEE80211_P2P_OPPPS_ENABLE_BIT)) return false; - /* - * Avoid using uAPSD if client is in DCM - - * low latency issue in Miracast - */ - if (iwl_mvm_phy_ctx_count(mvm) >= 2) + if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) return false; - if (vif->p2p) { - /* Allow U-APSD only if p2p is stand alone */ - bool is_p2p_standalone = true; - - if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) - return false; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_p2p_standalone_iterator, - &is_p2p_standalone); - - if (!is_p2p_standalone) - return false; - } + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_allow_uapsd_iterator, + &data); - return true; + return data.allow_uapsd; } -static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) +static bool iwl_mvm_power_is_radar(struct ieee80211_bss_conf *link_conf) { struct ieee80211_chanctx_conf *chanctx_conf; - struct ieee80211_bss_conf *link_conf; - bool radar_detect = false; - unsigned int link_id; - rcu_read_lock(); - for_each_vif_active_link(vif, link_conf, link_id) { - chanctx_conf = rcu_dereference(link_conf->chanctx_conf); - /* this happens on link switching, just ignore inactive ones */ - if (!chanctx_conf) - continue; + chanctx_conf = rcu_dereference(link_conf->chanctx_conf); - radar_detect = !!(chanctx_conf->def.chan->flags & - IEEE80211_CHAN_RADAR); - if (radar_detect) - goto out; - } + /* this happens on link switching, just ignore inactive ones */ + if (!chanctx_conf) + return false; -out: - rcu_read_unlock(); - return radar_detect; + return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR; } static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { - int dtimper = vif->bss_conf.dtim_period ?: 1; - int skip; + struct ieee80211_bss_conf *link_conf; + unsigned int min_link_skip = ~0; + unsigned int link_id; /* disable, in case we're supposed to override */ cmd->skip_dtim_periods = 0; cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); - if (iwl_mvm_power_is_radar(vif)) + if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) { + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) + return; + cmd->skip_dtim_periods = 2; + cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); return; + } - if (dtimper >= 10) - return; + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + unsigned int dtimper = link_conf->dtim_period ?: 1; + unsigned int dtimper_tu = dtimper * link_conf->beacon_int; + unsigned int skip; - if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) { - if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) + if (dtimper >= 10 || iwl_mvm_power_is_radar(link_conf)) { + rcu_read_unlock(); return; - skip = 2; - } else { - int dtimper_tu = dtimper * vif->bss_conf.beacon_int; + } if (WARN_ON(!dtimper_tu)) - return; + continue; + /* configure skip over dtim up to 900 TU DTIM interval */ - skip = max_t(u8, 1, 900 / dtimper_tu); + skip = max_t(int, 1, 900 / dtimper_tu); + min_link_skip = min(min_link_skip, skip); } + rcu_read_unlock(); + + /* no WARN_ON, can only happen with WARN_ON above */ + if (min_link_skip == ~0) + return; - cmd->skip_dtim_periods = skip; + cmd->skip_dtim_periods = min_link_skip; cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); } @@ -559,7 +567,7 @@ struct iwl_power_vifs { bool monitor_active; }; -static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac, +static void iwl_mvm_power_disable_pm_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -567,7 +575,7 @@ static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac, mvmvif->pm_enabled = false; } -static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, +static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -813,8 +821,7 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_beacon_filter_cmd *cmd, - u32 cmd_flags) + struct iwl_beacon_filter_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; @@ -825,29 +832,27 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); - ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags); + ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd); if (!ret) - mvmvif->bf_data.bf_enabled = true; + mvmvif->bf_enabled = true; return ret; } int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags) + struct ieee80211_vif *vif) { struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = cpu_to_le32(1), }; - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags); + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd); } static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags) + struct ieee80211_vif *vif) { struct iwl_beacon_filter_cmd cmd = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -856,19 +861,18 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; - ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); + ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); if (!ret) - mvmvif->bf_data.bf_enabled = false; + mvmvif->bf_enabled = false; return ret; } int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags) + struct ieee80211_vif *vif) { - return _iwl_mvm_disable_beacon_filter(mvm, vif, flags); + return _iwl_mvm_disable_beacon_filter(mvm, vif); } static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) @@ -908,18 +912,18 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, .bf_enable_beacon_filter = cpu_to_le32(1), }; - if (!mvmvif->bf_data.bf_enabled) + if (!mvmvif->bf_enabled) return 0; if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); - mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || - mvm->ps_disabled || - !vif->cfg.ps || - iwl_mvm_vif_low_latency(mvmvif)); + mvmvif->ba_enabled = !(!mvmvif->pm_enabled || + mvm->ps_disabled || + !vif->cfg.ps || + iwl_mvm_vif_low_latency(mvmvif)); - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0); + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd); } int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c index 2ecd32bed752..045c862a8fc4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c @@ -132,14 +132,18 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm) if (ret) return ERR_PTR(ret); - if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size)) + if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != + resp_size)) { + iwl_free_resp(&cmd); return ERR_PTR(-EIO); + } resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL); + iwl_free_resp(&cmd); + if (!resp) return ERR_PTR(-ENOMEM); - iwl_free_resp(&cmd); return resp; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 6cba8a353b53..de5ac000272e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include "rs.h" #include "fw-api.h" @@ -440,12 +440,6 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (!mvmsta) { - IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", - notif->sta_id); - goto out; - } - flags = le32_to_cpu(notif->flags); mvm_link_sta = rcu_dereference(mvmsta->link[link_sta->link_id]); @@ -479,9 +473,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, } if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) { + u32 enabled = le32_to_cpu(notif->amsdu_enabled); u16 size = le32_to_cpu(notif->amsdu_size); int i; + if (size < 2000) { + size = 0; + enabled = 0; + } + if (link_sta->agg.max_amsdu_len < size) { /* * In debug link_sta->agg.max_amsdu_len < size @@ -492,7 +492,7 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, goto out; } - mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); + mvmsta->amsdu_enabled = enabled; mvmsta->max_amsdu_len = size; link_sta->agg.max_rc_amsdu_len = mvmsta->max_amsdu_len; @@ -508,6 +508,8 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, link_sta->agg.max_tid_amsdu_len[i] = 1; } + ieee80211_sta_recalc_aggregates(sta); + IWL_DEBUG_RATE(mvm, "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", le32_to_cpu(notif->amsdu_size), size, @@ -525,10 +527,10 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta, const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; const struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap; - if (WARN_ON_ONCE(!link_conf->chandef.chan)) + if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan)) return IEEE80211_MAX_MPDU_LEN_VHT_3895; - if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) { + if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) { switch (le16_get_bits(link_sta->he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: @@ -538,7 +540,7 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta, default: return IEEE80211_MAX_MPDU_LEN_VHT_3895; } - } else if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ && + } else if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ && eht_cap->has_eht) { switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0], IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) { @@ -603,14 +605,12 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm, cpu_to_le16(max_amsdu_len) : 0, }; unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); int cmd_ver; int ret; - /* Enable external EHT LTF only for GL device and if there's - * mutual support by AP and client - */ - if (CSR_HW_REV_TYPE(mvm->trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && - sband_eht_cap && + /* Enable extra EHT LTF if there's mutual support by AP and client */ + if (sband_eht_cap && sband_eht_cap->eht_cap_elem.phy_cap_info[5] & IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF && link_sta->eht_cap.has_eht && @@ -646,12 +646,12 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm, * since TLC offload works with one mode we can assume * that only vht/ht is used and also set it as station max amsdu */ - sta->deflink.agg.max_amsdu_len = max_amsdu_len; + link_sta->agg.max_amsdu_len = max_amsdu_len; + ieee80211_sta_recalc_aggregates(sta); + + cfg_cmd.max_tx_op = cpu_to_le16(mvmvif->max_tx_op); - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(DATA_PATH_GROUP, - TLC_MNG_CONFIG_CMD), - 0); + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n", cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n", @@ -687,9 +687,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm, u16 cmd_size = sizeof(cfg_cmd_v3); /* In old versions of the API the struct is 4 bytes smaller */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(DATA_PATH_GROUP, - TLC_MNG_CONFIG_CMD), 0) < 3) + if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) < 3) cmd_size -= 4; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 481d68cbbbd8..a8c4e354e2ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -4161,6 +4161,8 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, * @mvm: The mvm component * @mvmsta: The station * @enable: Enable Tx protection? + * + * Returns: an error code */ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 376b23b409dc..ea81cb236d5c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -3,7 +3,7 @@ * * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2003 - 2014, 2018 - 2023 Intel Corporation + * Copyright (C) 2003 - 2014, 2018 - 2024 Intel Corporation *****************************************************************************/ #ifndef __rs_h__ @@ -122,13 +122,8 @@ enum { #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) -/* - * FIXME - various places in firmware API still use u8, - * e.g. LQ command and SCD config command. - * This should be 256 instead. - */ -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255) -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64) #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) #define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ @@ -203,11 +198,12 @@ struct rs_rate { /** * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW * @last_rate_n_flags: last rate reported by FW + * @pers: persistent fields * @pers.sta_id: the id of the station - * @chains: bitmask of chains reported in %chain_signal - * @chain_signal: per chain signal strength - * @last_rssi: last rssi reported - * @drv: pointer back to the driver data + * @pers.chains: bitmask of chains reported in %chain_signal + * @pers.chain_signal: per chain signal strength + * @pers.last_rssi: last rssi reported + * @pers.drv: pointer back to the driver data */ struct iwl_lq_sta_rs_fw { /* last tx rate_n_flags */ @@ -218,11 +214,11 @@ struct iwl_lq_sta_rs_fw { u32 sta_id; #ifdef CONFIG_MAC80211_DEBUGFS /** - * @dbg_fixed_rate: for debug, use fixed rate if not 0 + * @pers.dbg_fixed_rate: for debug, use fixed rate if not 0 */ u32 dbg_fixed_rate; /** - * @dbg_agg_frame_count_lim: for debug, max number of + * @pers.dbg_agg_frame_count_lim: for debug, max number of * frames in A-MPDU */ u16 dbg_agg_frame_count_lim; @@ -407,7 +403,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, struct ieee80211_tx_info *info, bool ndp); /** - * iwl_rate_control_register - Register the rate control algorithm callbacks + * iwl_mvm_rate_control_register - Register the rate control algorithm callbacks * * Since the rate control algorithm is hardware specific, there is no need * or reason to place it as a stand alone module. The driver can call @@ -419,7 +415,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int iwl_mvm_rate_control_register(void); /** - * iwl_rate_control_unregister - Unregister the rate control callbacks + * iwl_mvm_rate_control_unregister - Unregister the rate control callbacks * * This should be called after calling ieee80211_unregister_hw, but before * the driver is unloaded. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 8caa971770c6..2dbef7b46355 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -1,10 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include "iwl-trans.h" @@ -556,36 +556,39 @@ struct iwl_mvm_stat_data_all_macs { struct iwl_stats_ntfy_per_mac *per_mac; }; -static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) +static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig, + struct iwl_mvm_vif_link_info *link_info, + struct ieee80211_bss_conf *bss_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; - int thold = vif->bss_conf.cqm_rssi_thold; - int hyst = vif->bss_conf.cqm_rssi_hyst; + int thold = bss_conf->cqm_rssi_thold; + int hyst = bss_conf->cqm_rssi_hyst; int last_event; + s8 exit_esr_thresh; if (sig == 0) { IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); return; } - mvmvif->bf_data.ave_beacon_signal = sig; + link_info->bf_data.ave_beacon_signal = sig; /* BT Coex */ - if (mvmvif->bf_data.bt_coex_min_thold != - mvmvif->bf_data.bt_coex_max_thold) { - last_event = mvmvif->bf_data.last_bt_coex_event; - if (sig > mvmvif->bf_data.bt_coex_max_thold && - (last_event <= mvmvif->bf_data.bt_coex_min_thold || + if (link_info->bf_data.bt_coex_min_thold != + link_info->bf_data.bt_coex_max_thold) { + last_event = link_info->bf_data.last_bt_coex_event; + if (sig > link_info->bf_data.bt_coex_max_thold && + (last_event <= link_info->bf_data.bt_coex_min_thold || last_event == 0)) { - mvmvif->bf_data.last_bt_coex_event = sig; + link_info->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH); - } else if (sig < mvmvif->bf_data.bt_coex_min_thold && - (last_event >= mvmvif->bf_data.bt_coex_max_thold || + } else if (sig < link_info->bf_data.bt_coex_min_thold && + (last_event >= link_info->bf_data.bt_coex_max_thold || last_event == 0)) { - mvmvif->bf_data.last_bt_coex_event = sig; + link_info->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW); @@ -596,10 +599,10 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) return; /* CQM Notification */ - last_event = mvmvif->bf_data.last_cqm_event; + last_event = link_info->bf_data.last_cqm_event; if (thold && sig < thold && (last_event == 0 || sig < last_event - hyst)) { - mvmvif->bf_data.last_cqm_event = sig; + link_info->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", sig); ieee80211_cqm_rssi_notify( @@ -609,7 +612,7 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { - mvmvif->bf_data.last_cqm_event = sig; + link_info->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", sig); ieee80211_cqm_rssi_notify( @@ -618,6 +621,27 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) sig, GFP_KERNEL); } + + /* ESR recalculation */ + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) + return; + + /* We're not in EMLSR and our signal is bad, try to switch link maybe */ + if (sig < IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH && !mvmvif->esr_active) { + iwl_mvm_int_mlo_scan(mvm, vif); + return; + } + + /* We are in EMLSR, check if we need to exit */ + exit_esr_thresh = + iwl_mvm_get_esr_rssi_thresh(mvm, + &bss_conf->chanreq.oper, + true); + + if (sig < exit_esr_thresh) + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_LOW_RSSI, + iwl_mvm_get_other_link(vif, + bss_conf->link_id)); } static void iwl_mvm_stat_iterator(void *_data, u8 *mac, @@ -651,7 +675,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, mvmvif->deflink.beacon_stats.accu_num_beacons += mvmvif->deflink.beacon_stats.num_beacons; - iwl_mvm_update_vif_sig(vif, sig); + /* This is used in pre-MLO API so use deflink */ + iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf); } static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, @@ -684,7 +709,9 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, mvmvif->deflink.beacon_stats.num_beacons; sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy); - iwl_mvm_update_vif_sig(vif, sig); + + /* This is used in pre-MLO API so use deflink */ + iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf); } static inline void @@ -719,8 +746,8 @@ static void iwl_mvm_stats_energy_iter(void *_data, u8 *energy = _data; u32 sta_id = mvmsta->deflink.sta_id; - if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT_MAX, "sta_id %d >= %d", - sta_id, IWL_MVM_STATION_COUNT_MAX)) + if (WARN_ONCE(sta_id >= IWL_STATION_COUNT_MAX, "sta_id %d >= %d", + sta_id, IWL_STATION_COUNT_MAX)) return; if (energy[sta_id]) @@ -752,6 +779,21 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le, spin_unlock(&mvm->tcm.lock); } +static void iwl_mvm_handle_per_phy_stats(struct iwl_mvm *mvm, + struct iwl_stats_ntfy_per_phy *per_phy) +{ + int i; + + for (i = 0; i < NUM_PHY_CTX; i++) { + if (!mvm->phy_ctxts[i].ref) + continue; + mvm->phy_ctxts[i].channel_load_by_us = + le32_to_cpu(per_phy[i].channel_load_by_us); + mvm->phy_ctxts[i].channel_load_not_by_us = + le32_to_cpu(per_phy[i].channel_load_not_by_us); + } +} + static void iwl_mvm_stats_ver_15(struct iwl_mvm *mvm, struct iwl_statistics_operational_ntfy *stats) @@ -766,6 +808,7 @@ iwl_mvm_stats_ver_15(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator_all_macs, &data); + iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy); } static void @@ -841,6 +884,7 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, struct iwl_stats_ntfy_per_link *link_stats; struct ieee80211_bss_conf *bss_conf; struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_vif_link_info *link_info; int link_id; int sig; @@ -857,20 +901,26 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, continue; mvmvif = iwl_mvm_vif_from_mac80211(bss_conf->vif); - if (!mvmvif || !mvmvif->link[link_id]) + link_info = mvmvif->link[link_id]; + if (!link_info) continue; link_stats = &per_link[fw_link_id]; - mvmvif->link[link_id]->beacon_stats.num_beacons = + link_info->beacon_stats.num_beacons = le32_to_cpu(link_stats->beacon_counter); /* we basically just use the u8 to store 8 bits and then treat * it as a s8 whenever we take it out to a different type. */ - mvmvif->link[link_id]->beacon_stats.avg_signal = + link_info->beacon_stats.avg_signal = -le32_to_cpu(link_stats->beacon_average_energy); + if (link_info->phy_ctxt && + link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ) + iwl_mvm_bt_coex_update_link_esr(mvm, bss_conf->vif, + link_id); + /* make sure that beacon statistics don't go backwards with TCM * request to clear statistics */ @@ -879,7 +929,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, mvmvif->link[link_id]->beacon_stats.num_beacons; sig = -le32_to_cpu(link_stats->beacon_filter_average_energy); - iwl_mvm_update_vif_sig(bss_conf->vif, sig); + iwl_mvm_update_link_sig(bss_conf->vif, sig, link_info, + bss_conf); if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX, "invalid mvmvif id: %d", mvmvif->id)) @@ -909,10 +960,115 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, } } +#define SEC_LINK_MIN_PERC 10 +#define SEC_LINK_MIN_TX 3000 +#define SEC_LINK_MIN_RX 400 + +/* Accept a ~20% short window to avoid issues due to jitter */ +#define IWL_MVM_TPT_MIN_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ * 4 / 5) + +static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm) +{ + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); + struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_sta *mvmsta; + unsigned long total_tx = 0, total_rx = 0; + unsigned long sec_link_tx = 0, sec_link_rx = 0; + u8 sec_link_tx_perc, sec_link_rx_perc; + u8 sec_link; + bool skip = false; + + lockdep_assert_held(&mvm->mutex); + + if (IS_ERR_OR_NULL(bss_vif)) + return; + + mvmvif = iwl_mvm_vif_from_mac80211(bss_vif); + + if (!mvmvif->esr_active || !mvmvif->ap_sta) + return; + + mvmsta = iwl_mvm_sta_from_mac80211(mvmvif->ap_sta); + /* We only count for the AP sta in a MLO connection */ + if (!mvmsta->mpdu_counters) + return; + + /* Get the FW ID of the secondary link */ + sec_link = iwl_mvm_get_other_link(bss_vif, + iwl_mvm_get_primary_link(bss_vif)); + if (WARN_ON(!mvmvif->link[sec_link])) + return; + sec_link = mvmvif->link[sec_link]->fw_link_id; + + /* Sum up RX and TX MPDUs from the different queues/links */ + for (int q = 0; q < mvm->trans->num_rx_queues; q++) { + spin_lock_bh(&mvmsta->mpdu_counters[q].lock); + + /* The link IDs that doesn't exist will contain 0 */ + for (int link = 0; link < IWL_FW_MAX_LINK_ID; link++) { + total_tx += mvmsta->mpdu_counters[q].per_link[link].tx; + total_rx += mvmsta->mpdu_counters[q].per_link[link].rx; + } + + sec_link_tx += mvmsta->mpdu_counters[q].per_link[sec_link].tx; + sec_link_rx += mvmsta->mpdu_counters[q].per_link[sec_link].rx; + + /* + * In EMLSR we have statistics every 5 seconds, so we can reset + * the counters upon every statistics notification. + * The FW sends the notification regularly, but it will be + * misaligned at the start. Skipping the measurement if it is + * short will synchronize us. + */ + if (jiffies - mvmsta->mpdu_counters[q].window_start < + IWL_MVM_TPT_MIN_COUNT_WINDOW) + skip = true; + mvmsta->mpdu_counters[q].window_start = jiffies; + memset(mvmsta->mpdu_counters[q].per_link, 0, + sizeof(mvmsta->mpdu_counters[q].per_link)); + + spin_unlock_bh(&mvmsta->mpdu_counters[q].lock); + } + + if (skip) { + IWL_DEBUG_INFO(mvm, "MPDU statistics window was short\n"); + return; + } + + IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", + total_tx, total_rx); + + /* If we don't have enough MPDUs - exit EMLSR */ + if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH && + total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) { + iwl_mvm_block_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_TPT, + iwl_mvm_get_primary_link(bss_vif)); + return; + } + + IWL_DEBUG_INFO(mvm, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n", + sec_link, sec_link_tx, sec_link_rx); + + /* Calculate the percentage of the secondary link TX/RX */ + sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0; + sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0; + + /* + * The TX/RX percentage is checked only if it exceeds the required + * minimum. In addition, RX is checked only if the TX check failed. + */ + if ((total_tx > SEC_LINK_MIN_TX && + sec_link_tx_perc < SEC_LINK_MIN_PERC) || + (total_rx > SEC_LINK_MIN_RX && + sec_link_rx_perc < SEC_LINK_MIN_PERC)) + iwl_mvm_exit_esr(mvm, bss_vif, IWL_MVM_ESR_EXIT_LINK_USAGE, + iwl_mvm_get_primary_link(bss_vif)); +} + void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { - u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; + u8 average_energy[IWL_STATION_COUNT_MAX]; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_system_statistics_notif_oper *stats; int i; @@ -935,6 +1091,9 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm, ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, average_energy); + iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy); + + iwl_mvm_update_esr_mode_tpt(mvm); } void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm, @@ -968,7 +1127,7 @@ static void iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; + u8 average_energy[IWL_STATION_COUNT_MAX]; __le32 air_time[MAC_INDEX_AUX]; __le32 rx_bytes[MAC_INDEX_AUX]; __le32 flags = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index af15d470c69b..14ea89f931bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -236,21 +236,13 @@ static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb, int queue, - struct ieee80211_sta *sta, - struct ieee80211_link_sta *link_sta) + struct ieee80211_sta *sta) { if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) { kfree_skb(skb); return; } - if (sta && sta->valid_links && link_sta) { - struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - - rx_status->link_valid = 1; - rx_status->link_id = link_sta->link_id; - } - ieee80211_rx_napi(mvm->hw, sta, skb, napi); } @@ -282,6 +274,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, u32 status, struct ieee80211_rx_status *stats) { + struct wireless_dev *wdev; struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; u8 keyid; @@ -303,9 +296,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, if (!ieee80211_is_beacon(hdr->frame_control)) return 0; + if (!sta) + return -1; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + /* key mismatch - will also report !MIC_OK but we shouldn't count it */ if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID)) - return -1; + goto report; /* good cases */ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK && @@ -314,13 +313,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, return 0; } - if (!sta) - return -1; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - /* * both keys will have the same cipher and MIC length, use * whichever one is available @@ -329,11 +321,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, if (!key) { key = rcu_dereference(mvmvif->bcn_prot.keys[1]); if (!key) - return -1; + goto report; } if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2) - return -1; + goto report; /* get the real key ID */ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2]; @@ -347,7 +339,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, return -1; key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]); if (!key) - return -1; + goto report; } /* Report status to mac80211 */ @@ -355,6 +347,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, ieee80211_key_mic_failure(key); else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR) ieee80211_key_replay(key); +report: + wdev = ieee80211_vif_to_wdev(mvmsta->vif); + if (wdev->netdev) + cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len); return -1; } @@ -397,8 +393,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, case IWL_RX_MPDU_STATUS_SEC_GCM: BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN); /* alg is CCM: check MIC only */ - if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) + if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) { + IWL_DEBUG_DROP(mvm, + "Dropping packet, bad MIC (CCM/GCM)\n"); return -1; + } stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED; *crypt_len = IEEE80211_CCMP_HDR_LEN; @@ -516,11 +515,9 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") */ if (ieee80211_is_ctl(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control) || - is_multicast_ether_addr(hdr->addr1)) { - rx_status->flag |= RX_FLAG_DUP_VALIDATED; + ieee80211_is_any_nullfunc(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) return false; - } if (ieee80211_is_data_qos(hdr->frame_control)) { /* frame has qos control */ @@ -569,7 +566,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, lockdep_assert_held(&reorder_buf->lock); while (ieee80211_sn_less(ssn, nssn)) { - int index = ssn % reorder_buf->buf_size; + int index = ssn % baid_data->buf_size; struct sk_buff_head *skb_list = &entries[index].frames; struct sk_buff *skb; @@ -583,7 +580,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, while ((skb = __skb_dequeue(skb_list))) { iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, reorder_buf->queue, - sta, NULL /* FIXME */); + sta); reorder_buf->num_stored--; } } @@ -620,7 +617,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, ieee80211_sn_add(reorder_buf->head_sn, - reorder_buf->buf_size)); + ba_data->buf_size)); spin_unlock_bh(&reorder_buf->lock); out: @@ -639,15 +636,19 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n", baid, nssn); - if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || - baid >= ARRAY_SIZE(mvm->baid_map))) + if (IWL_FW_CHECK(mvm, + baid == IWL_RX_REORDER_DATA_INVALID_BAID || + baid >= ARRAY_SIZE(mvm->baid_map), + "invalid BAID from FW: %d\n", baid)) return; rcu_read_lock(); ba_data = rcu_dereference(mvm->baid_map[baid]); if (!ba_data) { - WARN(true, "BAID %d not found in map\n", baid); + IWL_DEBUG_RX(mvm, + "Got valid BAID %d but not allocated, invalid frame release!\n", + baid); goto out; } @@ -685,11 +686,11 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, return; len -= sizeof(*notif) + sizeof(*internal_notif); - if (internal_notif->sync && - mvm->queue_sync_cookie != internal_notif->cookie) { - WARN_ONCE(1, "Received expired RX queue sync message\n"); + if (WARN_ONCE(internal_notif->sync && + mvm->queue_sync_cookie != internal_notif->cookie, + "Received expired RX queue sync message (cookie %d but wanted %d, queue %d)\n", + internal_notif->cookie, mvm->queue_sync_cookie, queue)) return; - } switch (internal_notif->type) { case IWL_MVM_RXQ_EMPTY: @@ -734,8 +735,6 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; u8 tid = ieee80211_get_tid(hdr); - u8 sub_frame_idx = desc->amsdu_info & - IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; struct iwl_mvm_reorder_buf_entry *entries; u32 sta_mask; int index; @@ -780,9 +779,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, return false; } - rcu_read_lock(); sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); - rcu_read_unlock(); if (IWL_FW_CHECK(mvm, tid != baid_data->tid || @@ -821,7 +818,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { if (!amsdu || last_subframe) buffer->head_sn = nssn; - /* No need to update AMSDU last SN - we are moving the head */ + spin_unlock_bh(&buffer->lock); return false; } @@ -838,21 +835,15 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, if (!amsdu || last_subframe) buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); - /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; } /* put in reorder buffer */ - index = sn % buffer->buf_size; + index = sn % baid_data->buf_size; __skb_queue_tail(&entries[index].frames, skb); buffer->num_stored++; - if (amsdu) { - buffer->last_amsdu = sn; - buffer->last_sub_index = sub_frame_idx; - } - /* * We cannot trust NSSN for AMSDU sub-frames that are not the last. * The reason is that NSSN advances on the first sub-frame, and may @@ -1004,7 +995,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); u32 rate_n_flags = phy_data->rate_n_flags; - u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; @@ -1059,13 +1050,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, if (he_mu) he_mu->flags2 |= - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1) + else if (he_type == RATE_MCS_HE_TYPE_TRIG) he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } @@ -1895,21 +1886,6 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb, } } -static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band) -{ - switch (phy_band) { - case PHY_BAND_24: - return NL80211_BAND_2GHZ; - case PHY_BAND_5: - return NL80211_BAND_5GHZ; - case PHY_BAND_6: - return NL80211_BAND_6GHZ; - default: - WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band); - return NL80211_BAND_5GHZ; - } -} - struct iwl_rx_sta_csa { bool all_sta_unblocked; struct ieee80211_vif *vif; @@ -1974,6 +1950,16 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm, iwl_mvm_decode_lsig(skb, phy_data); rx_status->device_timestamp = phy_data->gp2_on_air_rise; + + if (mvm->rx_ts_ptp && mvm->monitor_on) { + u64 adj_time = + iwl_mvm_ptp_get_adj_time(mvm, phy_data->gp2_on_air_rise * NSEC_PER_USEC); + + rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC); + rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64; + rx_status->flag &= ~RX_FLAG_MACTIME; + } + rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, @@ -2052,9 +2038,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, u32 len; u32 pkt_len = iwl_rx_packet_payload_len(pkt); struct ieee80211_sta *sta = NULL; - struct ieee80211_link_sta *link_sta = NULL; struct sk_buff *skb; u8 crypt_len = 0; + u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID); size_t desc_size; struct iwl_mvm_rx_phy_data phy_data = {}; u32 format; @@ -2173,7 +2159,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (iwl_mvm_is_band_in_rx_supported(mvm)) { u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx); - rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band); + rx_status->band = iwl_mvm_nl80211_band_from_phy(band); } else { rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; @@ -2203,13 +2189,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rcu_read_lock(); if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { - u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID); + if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) { + struct ieee80211_link_sta *link_sta; - if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) { - sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR(sta)) sta = NULL; - link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]); + link_sta = rcu_dereference(mvm->fw_id_to_link_sta[sta_id]); + + if (sta && sta->valid_links && link_sta) { + rx_status->link_valid = 1; + rx_status->link_id = link_sta->link_id; + } } } else if (!is_multicast_ether_addr(hdr->addr2)) { /* @@ -2325,6 +2316,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_agg_rx_received(mvm, reorder_data, baid); } + + if (ieee80211_is_data(hdr->frame_control)) { + u8 sub_frame_idx = desc->amsdu_info & + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + + /* 0 means not an A-MSDU, and 1 means a new A-MSDU */ + if (!sub_frame_idx || sub_frame_idx == 1) + iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false, + queue); + } } /* management stuff on default queue */ @@ -2353,8 +2354,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, !(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME)) rx_status->flag |= RX_FLAG_AMSDU_MORE; - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta, - link_sta); + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); } out: rcu_read_unlock(); @@ -2367,7 +2367,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data; u32 rssi; - u32 info_type; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; struct iwl_mvm_rx_phy_data phy_data; @@ -2380,7 +2379,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, return; rssi = le32_to_cpu(desc->rssi); - info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK; phy_data.d0 = desc->phy_info[0]; phy_data.d1 = desc->phy_info[1]; phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; @@ -2432,7 +2430,12 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, /* 0-length PSDU */ rx_status->flag |= RX_FLAG_NO_PSDU; - switch (info_type) { + /* mark as failed PLCP on any errors to skip checks in mac80211 */ + if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) != + RX_NO_DATA_INFO_ERR_NONE) + rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC; + + switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) { case RX_NO_DATA_INFO_TYPE_NDP: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; @@ -2457,8 +2460,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, * * We mark it as mac header, for upper layers to know where * all radio tap header ends. + * + * Since data doesn't move data while putting data on skb and that is + * the only way we use, data + len is the next place that hdr would be put */ - skb_reset_mac_header(skb); + skb_set_mac_header(skb, skb->len); /* * Override the nss from the rx_vec since the rate_n_flags has @@ -2506,19 +2512,24 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bar_frame_release *release = (void *)pkt->data; - unsigned int baid = le32_get_bits(release->ba_info, - IWL_BAR_FRAME_RELEASE_BAID_MASK); - unsigned int nssn = le32_get_bits(release->ba_info, - IWL_BAR_FRAME_RELEASE_NSSN_MASK); - unsigned int sta_id = le32_get_bits(release->sta_tid, - IWL_BAR_FRAME_RELEASE_STA_MASK); - unsigned int tid = le32_get_bits(release->sta_tid, - IWL_BAR_FRAME_RELEASE_TID_MASK); struct iwl_mvm_baid_data *baid_data; + u32 pkt_len = iwl_rx_packet_payload_len(pkt); + unsigned int baid, nssn, sta_id, tid; - if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release))) + if (IWL_FW_CHECK(mvm, pkt_len < sizeof(*release), + "Unexpected frame release notif size %d (expected %zu)\n", + pkt_len, sizeof(*release))) return; + baid = le32_get_bits(release->ba_info, + IWL_BAR_FRAME_RELEASE_BAID_MASK); + nssn = le32_get_bits(release->ba_info, + IWL_BAR_FRAME_RELEASE_NSSN_MASK); + sta_id = le32_get_bits(release->sta_tid, + IWL_BAR_FRAME_RELEASE_STA_MASK); + tid = le32_get_bits(release->sta_tid, + IWL_BAR_FRAME_RELEASE_TID_MASK); + if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || baid >= ARRAY_SIZE(mvm->baid_map))) return; @@ -2532,7 +2543,7 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, goto out; } - if (WARN(tid != baid_data->tid || sta_id > IWL_MVM_STATION_COUNT_MAX || + if (WARN(tid != baid_data->tid || sta_id > IWL_STATION_COUNT_MAX || !(baid_data->sta_mask & BIT(sta_id)), "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n", baid, baid_data->sta_mask, baid_data->tid, sta_id, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 7b6f1cdca067..60bd9c7e5f03 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -48,6 +48,8 @@ /* Number of iterations on the channel for mei filtered scan */ #define IWL_MEI_SCAN_NUM_ITER 5U +#define WFA_TPC_IE_LEN 9 + struct iwl_mvm_scan_timing_params { u32 suspend_time; u32 max_out_time; @@ -208,7 +210,7 @@ static void iwl_mvm_scan_iterator(void *_data, u8 *mac, curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif); - if (vif->type == NL80211_IFTYPE_AP && vif->p2p && + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO && mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt && mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id) data->is_dcm_with_p2p_go = true; @@ -226,6 +228,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, .global_cnt = 0, }; + /* + * A scanning AP interface probably wants to generate a survey to do + * ACS (automatic channel selection). + * Force a non-fragmented scan in that case. + */ + if (vif && ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP) + return IWL_SCAN_TYPE_WILD; + ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_iterator, @@ -241,13 +251,11 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, return IWL_SCAN_TYPE_FRAGMENTED; /* - * in case of DCM with GO where BSS DTIM interval < 220msec - * set all scan requests as fast-balance scan + * in case of DCM with P2P GO set all scan requests as + * fast-balance scan */ if (vif && vif->type == NL80211_IFTYPE_STATION && - data.is_dcm_with_p2p_go && - ((vif->bss_conf.beacon_int * - vif->bss_conf.dtim_period) < 220)) + data.is_dcm_with_p2p_go) return IWL_SCAN_TYPE_FAST_BALANCE; } @@ -297,8 +305,8 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm) max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; - /* we create the 802.11 header and SSID element */ - max_probe_len -= 24 + 2; + /* we create the 802.11 header SSID element and WFA TPC element */ + max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN; /* DS parameter set element is added on 2.4GHZ band if required */ if (iwl_mvm_rrm_scan_needed(mvm)) @@ -454,7 +462,7 @@ static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) if (!ssid_list[i].len) break; if (ssid_list[i].len == ssid_len && - !memcmp(ssid_list->ssid, ssid, ssid_len)) + !memcmp(ssid_list[i].ssid, ssid, ssid_len)) return i; } return -1; @@ -725,8 +733,6 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies, return newpos; } -#define WFA_TPC_IE_LEN 9 - static void iwl_mvm_add_tpc_report_ie(u8 *pos) { pos[0] = WLAN_EID_VENDOR_SPECIFIC; @@ -831,8 +837,8 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, return ((n_ssids <= PROBE_OPTION_MAX) && (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & (ies->common_ie_len + - ies->len[NL80211_BAND_2GHZ] + - ies->len[NL80211_BAND_5GHZ] <= + ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] + + ies->len[NL80211_BAND_6GHZ] <= iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); } @@ -854,11 +860,13 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, * 4. it's not a p2p find operation. * 5. we are not in low latency mode, * or if fragmented ebs is supported by the FW + * 6. the VIF is not an AP interface (scan wants survey results) */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS && vif->type != NL80211_IFTYPE_P2P_DEVICE && - (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm))); + (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)) && + ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP); } static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) @@ -1305,7 +1313,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, if (IWL_MVM_ADWELL_MAX_BUDGET) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); - else if (params->ssids && params->ssids[0].ssid_len) + else if (params->n_ssids && params->ssids[0].ssid_len) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else @@ -1379,11 +1387,14 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } -static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params) +static u32 iwl_mvm_scan_umac_ooc_priority(int type) { - return iwl_mvm_is_regular_scan(params) ? - IWL_SCAN_PRIORITY_EXT_6 : - IWL_SCAN_PRIORITY_EXT_2; + if (type == IWL_MVM_SCAN_REGULAR) + return IWL_SCAN_PRIORITY_EXT_6; + if (type == IWL_MVM_SCAN_INT_MLO) + return IWL_SCAN_PRIORITY_EXT_4; + + return IWL_SCAN_PRIORITY_EXT_2; } static void @@ -1407,7 +1418,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm, if (IWL_MVM_ADWELL_MAX_BUDGET) general_params->adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); - else if (params->ssids && params->ssids[0].ssid_len) + else if (params->n_ssids && params->ssids[0].ssid_len) general_params->adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else @@ -1583,7 +1594,7 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, for (i = 0; i < n_channels; i++) { channel_cfg[i].flags = cpu_to_le32(flags); - channel_cfg[i].v1.channel_num = channels[i]->hw_value; + channel_cfg[i].channel_num = channels[i]->hw_value; if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { enum nl80211_band band = channels[i]->band; @@ -1615,13 +1626,13 @@ iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm, &cp->channel_config[i]; cfg->flags = cpu_to_le32(flags); - cfg->v2.channel_num = channels[i]->hw_value; + cfg->channel_num = channels[i]->hw_value; cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band); cfg->v2.iter_count = 1; cfg->v2.iter_interval = 0; iwl_mvm_scan_ch_add_n_aps_override(vif_type, - cfg->v2.channel_num, + cfg->channel_num, cfg->v2.band, bitmap, bitmap_n_entries); } @@ -1645,9 +1656,20 @@ iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm, u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band); cfg->flags = cpu_to_le32(flags | n_aps_flag); - cfg->v2.channel_num = channels[i]->hw_value; + cfg->channel_num = channels[i]->hw_value; if (cfg80211_channel_is_psc(channels[i])) cfg->flags = 0; + + if (band == NL80211_BAND_6GHZ) { + /* 6 GHz channels should only appear in a scan request + * that has scan_6ghz set. The only exception is MLO + * scan, which has to be passive. + */ + WARN_ON_ONCE(cfg->flags != 0); + cfg->flags = + cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE); + } + cfg->v2.iter_count = 1; cfg->v2.iter_interval = 0; if (version < 17) @@ -1719,7 +1741,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm, break; } - if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) { + if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE && + !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid), + "scan: invalid BSSID at index %u, index_b=%u\n", + j, idex_b)) { memcpy(&pp->bssid_array[idex_b++], scan_6ghz_params[j].bssid, ETH_ALEN); } @@ -1749,8 +1774,9 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, &cp->channel_config[ch_cnt]; u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0; - u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries; - bool force_passive, found = false, allow_passive = true, + u8 k, n_s_ssids = 0, n_bssids = 0; + u8 max_s_ssids, max_bssids; + bool force_passive = false, found = false, allow_passive = true, unsolicited_probe_on_chan = false, psc_no_listen = false; s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED; @@ -1763,7 +1789,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, !params->n_6ghz_params && params->n_ssids) continue; - cfg->v1.channel_num = params->channels[i]->hw_value; + cfg->channel_num = params->channels[i]->hw_value; if (version < 17) cfg->v2.band = PHY_BAND_6; else @@ -1773,20 +1799,15 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, cfg->v5.iter_count = 1; cfg->v5.iter_interval = 0; - /* - * The optimize the scan time, i.e., reduce the scan dwell time - * on each channel, the below logic tries to set 3 direct BSSID - * probe requests for each broadcast probe request with a short - * SSID. - * TODO: improve this logic - */ - n_used_bssid_entries = 3; - for (j = 0; j < params->n_6ghz_params; j++) { + for (u32 j = 0; j < params->n_6ghz_params; j++) { s8 tmp_psd_20; if (!(scan_6ghz_params[j].channel_idx == i)) continue; + unsolicited_probe_on_chan |= + scan_6ghz_params[j].unsolicited_probe; + /* Use the highest PSD value allowed as advertised by * APs for this channel */ @@ -1798,12 +1819,69 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, psd_20 < tmp_psd_20)) psd_20 = tmp_psd_20; - found = false; - unsolicited_probe_on_chan |= - scan_6ghz_params[j].unsolicited_probe; psc_no_listen |= scan_6ghz_params[j].psc_no_listen; + } - for (k = 0; k < pp->short_ssid_num; k++) { + /* + * In the following cases apply passive scan: + * 1. Non fragmented scan: + * - PSC channel with NO_LISTEN_FLAG on should be treated + * like non PSC channel + * - Non PSC channel with more than 3 short SSIDs or more + * than 9 BSSIDs. + * - Non PSC Channel with unsolicited probe response and + * more than 2 short SSIDs or more than 6 BSSIDs. + * - PSC channel with more than 2 short SSIDs or more than + * 6 BSSIDs. + * 3. Fragmented scan: + * - PSC channel with more than 1 SSID or 3 BSSIDs. + * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs. + * - Non PSC channel with unsolicited probe response and + * more than 1 SSID or more than 3 BSSIDs. + */ + if (!iwl_mvm_is_scan_fragmented(params->type)) { + if (!cfg80211_channel_is_psc(params->channels[i]) || + psc_no_listen) { + if (unsolicited_probe_on_chan) { + max_s_ssids = 2; + max_bssids = 6; + } else { + max_s_ssids = 3; + max_bssids = 9; + } + } else { + max_s_ssids = 2; + max_bssids = 6; + } + } else if (cfg80211_channel_is_psc(params->channels[i])) { + max_s_ssids = 1; + max_bssids = 3; + } else { + if (unsolicited_probe_on_chan) { + max_s_ssids = 1; + max_bssids = 3; + } else { + max_s_ssids = 2; + max_bssids = 6; + } + } + + /* + * The optimize the scan time, i.e., reduce the scan dwell time + * on each channel, the below logic tries to set 3 direct BSSID + * probe requests for each broadcast probe request with a short + * SSID. + * TODO: improve this logic + */ + for (u32 j = 0; j < params->n_6ghz_params; j++) { + if (!(scan_6ghz_params[j].channel_idx == i)) + continue; + + found = false; + + for (k = 0; + k < pp->short_ssid_num && n_s_ssids < max_s_ssids; + k++) { if (!scan_6ghz_params[j].unsolicited_probe && le32_to_cpu(pp->short_ssid[k]) == scan_6ghz_params[j].short_ssid) { @@ -1814,25 +1892,25 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, } /* - * Use short SSID only to create a new - * iteration during channel dwell or in - * case that the short SSID has a - * matching SSID, i.e., scan for hidden - * APs. + * Prefer creating BSSID entries unless + * the short SSID probe can be done in + * the same channel dwell iteration. + * + * We also need to create a short SSID + * entry for any hidden AP. */ - if (n_used_bssid_entries >= 3) { - s_ssid_bitmap |= BIT(k); - s_max++; - n_used_bssid_entries -= 3; - found = true; + if (3 * n_s_ssids > n_bssids && + !pp->direct_scan[k].len) break; - } else if (pp->direct_scan[k].len) { - s_ssid_bitmap |= BIT(k); - s_max++; - found = true; + + /* Hidden AP, cannot do passive scan */ + if (pp->direct_scan[k].len) allow_passive = false; - break; - } + + s_ssid_bitmap |= BIT(k); + n_s_ssids++; + found = true; + break; } } @@ -1844,9 +1922,12 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, scan_6ghz_params[j].bssid, ETH_ALEN)) { if (!(bssid_bitmap & BIT(k))) { - bssid_bitmap |= BIT(k); - b_max++; - n_used_bssid_entries++; + if (n_bssids < max_bssids) { + bssid_bitmap |= BIT(k); + n_bssids++; + } else { + force_passive = TRUE; + } } break; } @@ -1860,39 +1941,6 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, if (unsolicited_probe_on_chan) flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES; - /* - * In the following cases apply passive scan: - * 1. Non fragmented scan: - * - PSC channel with NO_LISTEN_FLAG on should be treated - * like non PSC channel - * - Non PSC channel with more than 3 short SSIDs or more - * than 9 BSSIDs. - * - Non PSC Channel with unsolicited probe response and - * more than 2 short SSIDs or more than 6 BSSIDs. - * - PSC channel with more than 2 short SSIDs or more than - * 6 BSSIDs. - * 3. Fragmented scan: - * - PSC channel with more than 1 SSID or 3 BSSIDs. - * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs. - * - Non PSC channel with unsolicited probe response and - * more than 1 SSID or more than 3 BSSIDs. - */ - if (!iwl_mvm_is_scan_fragmented(params->type)) { - if (!cfg80211_channel_is_psc(params->channels[i]) || - flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) { - force_passive = (s_max > 3 || b_max > 9); - force_passive |= (unsolicited_probe_on_chan && - (s_max > 2 || b_max > 6)); - } else { - force_passive = (s_max > 2 || b_max > 6); - } - } else if (cfg80211_channel_is_psc(params->channels[i])) { - force_passive = (s_max > 1 || b_max > 3); - } else { - force_passive = (s_max > 2 || b_max > 6); - force_passive |= (unsolicited_probe_on_chan && - (s_max > 1 || b_max > 3)); - } if ((allow_passive && force_passive) || (!(bssid_bitmap | s_ssid_bitmap) && !cfg80211_channel_is_psc(params->channels[i]))) @@ -2100,7 +2148,8 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, - struct ieee80211_vif *vif, int type) + struct ieee80211_vif *vif, int type, + u16 gen_flags) { u8 flags = 0; @@ -2120,6 +2169,13 @@ static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm, IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT)) flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT; + /* Passive and AP interface -> ACS (automatic channel selection) */ + if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE && + ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP && + iwl_fw_lookup_notif_ver(mvm->fw, SCAN_GROUP, CHANNEL_SURVEY_NOTIF, + 0) >= 1) + flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS; + return flags; } @@ -2257,8 +2313,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_scan_umac_dwell(mvm, cmd, params); - mvm->scan_uid_status[uid] = type; - cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif); cmd->general_flags = cpu_to_le16(gen_flags); @@ -2299,10 +2353,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule, &tail_v2->delay); - if (ret) { - mvm->scan_uid_status[uid] = 0; + if (ret) return ret; - } if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { tail_v2->preq = params->preq; @@ -2425,7 +2477,7 @@ iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm, if (!cfg80211_channel_is_psc(channel)) continue; - cfg->v5.channel_num = channel->hw_value; + cfg->channel_num = channel->hw_value; cfg->v5.iter_count = 1; cfg->v5.iter_interval = 0; @@ -2452,9 +2504,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int ret; u16 gen_flags; - mvm->scan_uid_status[uid] = type; - - cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params)); + cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type)); cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); @@ -2489,15 +2539,14 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, u8 gen_flags2; u32 bitmap_ssid = 0; - mvm->scan_uid_status[uid] = type; - - cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params)); + cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type)); cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); if (version >= 15) - gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type); + gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type, + gen_flags); else gen_flags2 = 0; @@ -2534,10 +2583,8 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, params->n_channels, pb, cp, vif->type, version); - if (!cp->count) { - mvm->scan_uid_status[uid] = 0; + if (!cp->count) return -EINVAL; - } if (!params->n_ssids || (params->n_ssids == 1 && !params->ssids[0].ssid_len)) @@ -2815,7 +2862,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, if (ver_handler->version != scan_ver) continue; - return ver_handler->handler(mvm, vif, params, type, uid); + err = ver_handler->handler(mvm, vif, params, type, uid); + return err ? : uid; } err = iwl_mvm_scan_umac(mvm, vif, params, type, uid); @@ -2841,7 +2889,7 @@ static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac, if (vif == data->current_vif) return; - if (vif->type == NL80211_IFTYPE_AP && vif->p2p) { + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) { u32 link_id; for (link_id = 0; @@ -2916,9 +2964,11 @@ static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm, } } -int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct cfg80211_scan_request *req, - struct ieee80211_scan_ies *ies) +static int _iwl_mvm_single_scan_start(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req, + struct ieee80211_scan_ies *ies, + int type) { struct iwl_host_cmd hcmd = { .len = { iwl_mvm_scan_size(mvm), }, @@ -2936,7 +2986,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EBUSY; } - ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR); + ret = iwl_mvm_check_running_scans(mvm, type); if (ret) return ret; @@ -2985,8 +3035,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_scan_6ghz_passive_scan(mvm, ¶ms, vif); - uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, - IWL_MVM_SCAN_REGULAR); + uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type); if (uid < 0) return uid; @@ -3001,23 +3050,35 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ IWL_ERR(mvm, "Scan failed! ret %d\n", ret); iwl_mvm_resume_tcm(mvm); - mvm->scan_uid_status[uid] = 0; return ret; } - IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); - mvm->scan_status |= IWL_MVM_SCAN_REGULAR; - mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); + IWL_DEBUG_SCAN(mvm, "Scan request send success: type=%u, uid=%u\n", + type, uid); + + mvm->scan_uid_status[uid] = type; + mvm->scan_status |= type; + + if (type == IWL_MVM_SCAN_REGULAR) { + mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); + schedule_delayed_work(&mvm->scan_timeout_dwork, + msecs_to_jiffies(SCAN_TIMEOUT)); + } if (params.enable_6ghz_passive) mvm->last_6ghz_passive_scan_jiffies = jiffies; - schedule_delayed_work(&mvm->scan_timeout_dwork, - msecs_to_jiffies(SCAN_TIMEOUT)); - return 0; } +int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + return _iwl_mvm_single_scan_start(mvm, vif, req, ies, + IWL_MVM_SCAN_REGULAR); +} + int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, @@ -3118,23 +3179,23 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.n_channels = j; } - if (non_psc_included && - !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) { - kfree(params.channels); - return -ENOBUFS; + if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) { + ret = -ENOBUFS; + goto out; } uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type); - - if (non_psc_included) - kfree(params.channels); - if (uid < 0) - return uid; + if (uid < 0) { + ret = uid; + goto out; + } ret = iwl_mvm_send_cmd(mvm, &hcmd); if (!ret) { IWL_DEBUG_SCAN(mvm, - "Sched scan request was sent successfully\n"); + "Sched scan request send success: type=%u, uid=%u\n", + type, uid); + mvm->scan_uid_status[uid] = type; mvm->scan_status |= type; } else { /* If the scan failed, it usually means that the FW was unable @@ -3142,10 +3203,12 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, * should try to send the command again with different params. */ IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret); - mvm->scan_uid_status[uid] = 0; mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } +out: + if (non_psc_included) + kfree(params.channels); return ret; } @@ -3156,9 +3219,25 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_umac_scan_complete *notif = (void *)pkt->data; u32 uid = __le32_to_cpu(notif->uid); bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); + bool select_links = false; mvm->mei_scan_filter.is_mei_limited_scan = false; + IWL_DEBUG_SCAN(mvm, + "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n", + uid, mvm->scan_uid_status[uid], + notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? + "completed" : "aborted", + iwl_mvm_ebs_status_str(notif->ebs_status)); + + IWL_DEBUG_SCAN(mvm, "Scan completed: scan_status=0x%x\n", + mvm->scan_status); + + IWL_DEBUG_SCAN(mvm, + "Scan completed: line=%u, iter=%u, elapsed time=%u\n", + notif->last_schedule, notif->last_iter, + __le32_to_cpu(notif->time_from_last_iter)); + if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) return; @@ -3172,8 +3251,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_mvm_vif_link_info *link_info = scan_vif->link[mvm->scan_link_id]; - if (!WARN_ON(!link_info)) + /* It is possible that by the time the scan is complete the link + * was already removed and is not valid. + */ + if (link_info) memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN); + else + IWL_DEBUG_SCAN(mvm, "Scan link is no longer valid\n"); ieee80211_scan_completed(mvm->hw, &info); mvm->scan_vif = NULL; @@ -3182,25 +3266,28 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; + } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) { + IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n"); + /* + * Other scan types won't necessarily scan for the MLD links channels. + * Therefore, only select links after successful internal scan. + */ + select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED; } mvm->scan_status &= ~mvm->scan_uid_status[uid]; - IWL_DEBUG_SCAN(mvm, - "Scan completed, uid %u type %u, status %s, EBS status %s\n", - uid, mvm->scan_uid_status[uid], - notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? - "completed" : "aborted", - iwl_mvm_ebs_status_str(notif->ebs_status)); - IWL_DEBUG_SCAN(mvm, - "Last line %d, Last iteration %d, Time from last iteration %d\n", - notif->last_schedule, notif->last_iter, - __le32_to_cpu(notif->time_from_last_iter)); + + IWL_DEBUG_SCAN(mvm, "Scan completed: after update: scan_status=0x%x\n", + mvm->scan_status); if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && notif->ebs_status != IWL_SCAN_EBS_INACTIVE) mvm->last_ebs_successful = false; mvm->scan_uid_status[uid] = 0; + + if (select_links) + wiphy_work_queue(mvm->hw->wiphy, &mvm->trig_link_selection_wk); } void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, @@ -3226,13 +3313,23 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, mvm->scan_start); } -static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) +static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type, bool *wait) { - struct iwl_umac_scan_abort cmd = {}; + struct iwl_umac_scan_abort abort_cmd = {}; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC), + .len = { sizeof(abort_cmd), }, + .data = { &abort_cmd, }, + .flags = CMD_SEND_IN_RFKILL, + }; + int uid, ret; + u32 status = IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND; lockdep_assert_held(&mvm->mutex); + *wait = true; + /* We should always get a valid index here, because we already * checked that this type of scan was running in the generic * code. @@ -3241,16 +3338,28 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) if (WARN_ON_ONCE(uid < 0)) return uid; - cmd.uid = cpu_to_le32(uid); + abort_cmd.uid = cpu_to_le32(uid); IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); - ret = iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC), - 0, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); + + IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d, status=%u\n", ret, status); if (!ret) mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; + /* Handle the case that the FW is no longer familiar with the scan that + * is to be stopped. In such a case, it is expected that the scan + * complete notification was already received but not yet processed. + * In such a case, there is no need to wait for a scan complete + * notification and the flow should continue similar to the case that + * the scan was really aborted. + */ + if (status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND) { + mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; + *wait = false; + } + return ret; } @@ -3260,6 +3369,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC, SCAN_OFFLOAD_COMPLETE, }; int ret; + bool wait = true; lockdep_assert_held(&mvm->mutex); @@ -3271,7 +3381,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - ret = iwl_mvm_umac_scan_abort(mvm, type); + ret = iwl_mvm_umac_scan_abort(mvm, type, &wait); else ret = iwl_mvm_lmac_scan_abort(mvm); @@ -3279,6 +3389,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); return ret; + } else if (!wait) { + IWL_DEBUG_SCAN(mvm, "no need to wait for scan type %d\n", type); + iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); + return 0; } return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, @@ -3363,11 +3477,17 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) * restart_hw, so do not report if FW is about to be * restarted. */ - if (!mvm->fw_restart) + if (!iwlwifi_mod_params.fw_restart) ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; mvm->scan_uid_status[uid] = 0; } + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_INT_MLO); + if (uid >= 0) { + IWL_DEBUG_SCAN(mvm, "Internal MLO scan aborted\n"); + mvm->scan_uid_status[uid] = 0; + } + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_STOPPING_REGULAR); if (uid >= 0) @@ -3378,6 +3498,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) if (uid >= 0) mvm->scan_uid_status[uid] = 0; + uid = iwl_mvm_scan_uid_by_status(mvm, + IWL_MVM_SCAN_STOPPING_INT_MLO); + if (uid >= 0) + mvm->scan_uid_status[uid] = 0; + /* We shouldn't have any UIDs still set. Loop over all the * UIDs to make sure there's nothing left there and warn if * any is found. @@ -3403,7 +3528,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) * restarted. */ if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && - !mvm->fw_restart) { + !iwlwifi_mod_params.fw_restart) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } @@ -3414,6 +3539,10 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) { int ret; + IWL_DEBUG_SCAN(mvm, + "Request to stop scan: type=0x%x, status=0x%x\n", + type, mvm->scan_status); + if (!(mvm->scan_status & type)) return 0; @@ -3425,6 +3554,9 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) ret = iwl_mvm_scan_stop_wait(mvm, type); if (!ret) mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT; + else + IWL_DEBUG_SCAN(mvm, "Failed to stop scan\n"); + out: /* Clear the scan status so the next scan requests will * succeed and mark the scan as stopping, so that the Rx @@ -3449,3 +3581,277 @@ out: return ret; } + +static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_channel **channels, + size_t n_channels) +{ + struct cfg80211_scan_request *req = NULL; + struct ieee80211_scan_ies ies = {}; + size_t size, i; + int ret; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n", + n_channels); + + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) || + hweight16(vif->valid_links) == 1) + return -EINVAL; + + size = struct_size(req, channels, n_channels); + req = kzalloc(size, GFP_KERNEL); + if (!req) + return -ENOMEM; + + /* set the requested channels */ + for (i = 0; i < n_channels; i++) + req->channels[i] = channels[i]; + + req->n_channels = n_channels; + + /* set the rates */ + for (i = 0; i < NUM_NL80211_BANDS; i++) + if (mvm->hw->wiphy->bands[i]) + req->rates[i] = + (1 << mvm->hw->wiphy->bands[i]->n_bitrates) - 1; + + req->wdev = ieee80211_vif_to_wdev(vif); + req->wiphy = mvm->hw->wiphy; + req->scan_start = jiffies; + req->tsf_report_link_id = -1; + + ret = _iwl_mvm_single_scan_start(mvm, vif, req, &ies, + IWL_MVM_SCAN_INT_MLO); + kfree(req); + + IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret); + return ret; +} + +int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS]; + unsigned long usable_links = ieee80211_vif_usable_links(vif); + size_t n_channels = 0; + u8 link_id; + + lockdep_assert_held(&mvm->mutex); + + if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) { + IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n"); + return -EBUSY; + } + + rcu_read_lock(); + + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference(vif->link_conf[link_id]); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + channels[n_channels++] = link_conf->chanreq.oper.chan; + } + + rcu_read_unlock(); + + if (!n_channels) + return -EINVAL; + + return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels); +} + +static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm, + enum nl80211_band band, + u16 phy_chan_num) +{ + struct ieee80211_supported_band *sband = mvm->hw->wiphy->bands[band]; + int chan_idx; + + if (WARN_ON_ONCE(!sband)) + return -EINVAL; + + for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) { + struct ieee80211_channel *channel = &sband->channels[chan_idx]; + + if (channel->hw_value == phy_chan_num) + return chan_idx; + } + + return -EINVAL; +} + +static u32 iwl_mvm_div_by_db(u32 value, u8 db) +{ + /* + * 2^32 * 10**(i / 10) for i = [1, 10], skipping 0 and simply stopping + * at 10 dB and looping instead of using a much larger table. + * + * Using 64 bit math is overkill, but means the helper does not require + * a limit on the input range. + */ + static const u32 db_to_val[] = { + 0xcb59185e, 0xa1866ba8, 0x804dce7a, 0x65ea59fe, 0x50f44d89, + 0x404de61f, 0x331426af, 0x2892c18b, 0x203a7e5b, 0x1999999a, + }; + + while (value && db > 0) { + u8 change = min_t(u8, db, ARRAY_SIZE(db_to_val)); + + value = (((u64)value) * db_to_val[change - 1]) >> 32; + + db -= change; + } + + return value; +} + +VISIBLE_IF_IWLWIFI_KUNIT s8 +iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif) +{ + s8 average_magnitude; + u32 average_factor; + s8 sum_magnitude = -128; + u32 sum_factor = 0; + int i, count = 0; + + /* + * To properly average the decibel values (signal values given in dBm) + * we need to do the math in linear space. Doing a linear average of + * dB (dBm) values is a bit annoying though due to the large range of + * at least -10 to -110 dBm that will not fit into a 32 bit integer. + * + * A 64 bit integer should be sufficient, but then we still have the + * problem that there are no directly usable utility functions + * available. + * + * So, lets not deal with that and instead do much of the calculation + * with a 16.16 fixed point integer along with a base in dBm. 16.16 bit + * gives us plenty of head-room for adding up a few values and even + * doing some math on it. And the tail should be accurate enough too + * (1/2^16 is somewhere around -48 dB, so effectively zero). + * + * i.e. the real value of sum is: + * sum = sum_factor / 2^16 * 10^(sum_magnitude / 10) mW + * + * However, that does mean we need to be able to bring two values to + * a common base, so we need a helper for that. + * + * Note that this function takes an input with unsigned negative dBm + * values but returns a signed dBm (i.e. a negative value). + */ + + for (i = 0; i < ARRAY_SIZE(notif->noise); i++) { + s8 val_magnitude; + u32 val_factor; + + if (notif->noise[i] == 0xff) + continue; + + val_factor = 0x10000; + val_magnitude = -notif->noise[i]; + + if (val_magnitude <= sum_magnitude) { + u8 div_db = sum_magnitude - val_magnitude; + + val_factor = iwl_mvm_div_by_db(val_factor, div_db); + val_magnitude = sum_magnitude; + } else { + u8 div_db = val_magnitude - sum_magnitude; + + sum_factor = iwl_mvm_div_by_db(sum_factor, div_db); + sum_magnitude = val_magnitude; + } + + sum_factor += val_factor; + count++; + } + + /* No valid noise measurement, return a very high noise level */ + if (count == 0) + return 0; + + average_magnitude = sum_magnitude; + average_factor = sum_factor / count; + + /* + * average_factor will be a number smaller than 1.0 (0x10000) at this + * point. What we need to do now is to adjust average_magnitude so that + * average_factor is between -0.5 dB and 0.5 dB. + * + * Just do -1 dB steps and find the point where + * -0.5 dB * -i dB = 0x10000 * 10^(-0.5/10) / i dB + * = div_by_db(0xe429, i) + * is smaller than average_factor. + */ + for (i = 0; average_factor < iwl_mvm_div_by_db(0xe429, i); i++) { + /* nothing */ + } + + return average_magnitude - i; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_average_dbm_values); + +void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + const struct iwl_umac_scan_channel_survey_notif *notif = + (void *)pkt->data; + struct iwl_mvm_acs_survey_channel *info; + enum nl80211_band band; + int chan_idx; + + lockdep_assert_held(&mvm->mutex); + + if (!mvm->acs_survey) { + size_t n_channels = 0; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!mvm->hw->wiphy->bands[band]) + continue; + + n_channels += mvm->hw->wiphy->bands[band]->n_channels; + } + + mvm->acs_survey = kzalloc(struct_size(mvm->acs_survey, + channels, n_channels), + GFP_KERNEL); + + if (!mvm->acs_survey) + return; + + mvm->acs_survey->n_channels = n_channels; + n_channels = 0; + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!mvm->hw->wiphy->bands[band]) + continue; + + mvm->acs_survey->bands[band] = + &mvm->acs_survey->channels[n_channels]; + n_channels += mvm->hw->wiphy->bands[band]->n_channels; + } + } + + band = iwl_mvm_nl80211_band_from_phy(le32_to_cpu(notif->band)); + chan_idx = iwl_mvm_chanidx_from_phy(mvm, band, + le32_to_cpu(notif->channel)); + if (WARN_ON_ONCE(chan_idx < 0)) + return; + + IWL_DEBUG_SCAN(mvm, "channel survey received for freq %d\n", + mvm->hw->wiphy->bands[band]->channels[chan_idx].center_freq); + + info = &mvm->acs_survey->bands[band][chan_idx]; + + /* Times are all in ms */ + info->time = le32_to_cpu(notif->active_time); + info->time_busy = le32_to_cpu(notif->busy_time); + info->time_rx = le32_to_cpu(notif->rx_time); + info->time_tx = le32_to_cpu(notif->tx_time); + info->noise = iwl_mvm_average_dbm_values(notif); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index 30d4233595e8..16285ae7cae9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2013-2014, 2018-2019, 2022-2023 Intel Corporation + * Copyright (C) 2013-2014, 2018-2019, 2022-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #include "mvm.h" @@ -232,6 +232,9 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, }; struct ieee80211_sta *sta = NULL; + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD)) + return 0; /* * Ignore the call if we are in HW Restart flow, or if the handled * vif is a p2p device. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index c2e0cff740e9..7a4844ec3c10 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2015, 2018-2023 Intel Corporation + * Copyright (C) 2012-2015, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -29,7 +29,7 @@ int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) int sta_id; u32 reserved_ids = 0; - BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32); + BUILD_BUG_ON(IWL_STATION_COUNT_MAX > 32); WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); lockdep_assert_held(&mvm->mutex); @@ -47,7 +47,7 @@ int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) lockdep_is_held(&mvm->mutex))) return sta_id; } - return IWL_MVM_INVALID_STA; + return IWL_INVALID_STA; } /* Calculate the ampdu density and max size */ @@ -71,7 +71,7 @@ u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta, mpdu_dens = link_sta->ht_cap.ampdu_density; } - if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) { + if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) { /* overwrite HT values on 6 GHz */ mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); @@ -208,7 +208,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } if (sta->deflink.ht_cap.ht_supported || - mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) + mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ) add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); @@ -857,12 +857,6 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, size = iwl_mvm_get_queue_size(sta); } - /* take the min with bc tbl entries allowed */ - size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16)); - - /* size needs to be power of 2 values for calculating read/write pointers */ - size = rounddown_pow_of_two(size); - if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_link_sta *link_sta; @@ -887,22 +881,13 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, if (!sta_mask) return -EINVAL; - do { - queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask, - tid, size, timeout); - - if (queue < 0) - IWL_DEBUG_TX_QUEUES(mvm, - "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n", - size, sta_mask, tid, queue); - size /= 2; - } while (queue < 0 && size >= 16); - - if (queue < 0) - return queue; + queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask, + tid, size, timeout); - IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n", - queue, sta_mask, tid); + if (queue >= 0) + IWL_DEBUG_TX_QUEUES(mvm, + "Enabling TXQ #%d for sta mask 0x%x tid %d\n", + queue, sta_mask, tid); return queue; } @@ -915,7 +900,7 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif); int queue = -1; lockdep_assert_held(&mvm->mutex); @@ -1095,7 +1080,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) return; mvmsta = iwl_mvm_sta_from_mac80211(sta); - wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif); ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); @@ -1231,7 +1216,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, * can be unshared and finding one (and only one) that can be * reused. * This function is also invoked as a sort of clean-up task, - * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. + * in which case @alloc_for_sta is IWL_INVALID_STA. * * Returns the queue number, or -ENOSPC. */ @@ -1324,7 +1309,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) rcu_read_unlock(); - if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { + if (free_queue >= 0 && alloc_for_sta != IWL_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); if (ret) @@ -1345,7 +1330,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif); int queue = -1; u16 queue_tmp; unsigned long disable_agg_tids = 0; @@ -1535,9 +1520,14 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, add_stream_wk); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); + + /* will reschedule to run after restart */ + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) || + test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return; - iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); + iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA); while (!list_empty(&mvm->add_stream_txqs)) { struct iwl_mvm_txq *mvmtxq; @@ -1579,8 +1569,6 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) iwl_mvm_mac_itxq_xmit(mvm->hw, txq); local_bh_enable(); } - - mutex_unlock(&mvm->mutex); } static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, @@ -1595,7 +1583,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, return 0; /* run the general cleanup/unsharing of queues */ - iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); + iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA); /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && @@ -1637,7 +1625,7 @@ void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); unsigned int wdg = - iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); + iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif); int i; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvm_sta->deflink.sta_id, @@ -1771,7 +1759,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * this function */ if (!mvm->mld_api_is_used) { - if (WARN_ON(sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON(sta_id == IWL_INVALID_STA)) return -EINVAL; mvm_sta->deflink.sta_id = sta_id; @@ -1847,6 +1835,18 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); + /* MPDUs are counted only when EMLSR is possible */ + if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + !sta->tdls && ieee80211_vif_is_mld(vif)) { + mvm_sta->mpdu_counters = + kcalloc(mvm->trans->num_rx_queues, + sizeof(*mvm_sta->mpdu_counters), + GFP_KERNEL); + if (mvm_sta->mpdu_counters) + for (int q = 0; q < mvm->trans->num_rx_queues; q++) + spin_lock_init(&mvm_sta->mpdu_counters[q].lock); + } + return 0; } @@ -1868,7 +1868,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, else sta_id = mvm_sta->deflink.sta_id; - if (sta_id == IWL_MVM_INVALID_STA) + if (sta_id == IWL_INVALID_STA) return -ENOSPC; spin_lock_init(&mvm_sta->lock); @@ -1906,10 +1906,10 @@ update_fw: if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { - WARN_ON(mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA); + WARN_ON(mvmvif->deflink.ap_sta_id != IWL_INVALID_STA); mvmvif->deflink.ap_sta_id = sta_id; } else { - WARN_ON(mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA); + WARN_ON(mvmvif->deflink.ap_sta_id == IWL_INVALID_STA); } } @@ -2048,9 +2048,9 @@ int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, * Returns if we're done with removing the station, either * with error or success */ -bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, +void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_link_sta *link_sta, int *ret) + struct ieee80211_link_sta *link_sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif_link_info *mvm_link = @@ -2066,39 +2066,13 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, lockdep_is_held(&mvm->mutex)); sta_id = mvm_link_sta->sta_id; - /* If there is a TXQ still marked as reserved - free it */ - if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { - u8 reserved_txq = mvm_sta->reserved_queue; - enum iwl_mvm_queue_status *status; - - /* - * If no traffic has gone through the reserved TXQ - it - * is still marked as IWL_MVM_QUEUE_RESERVED, and - * should be manually marked as free again - */ - status = &mvm->queue_info[reserved_txq].status; - if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && - (*status != IWL_MVM_QUEUE_FREE), - "sta_id %d reserved txq %d status %d", - sta_id, reserved_txq, *status)) { - *ret = -EINVAL; - return true; - } - - *status = IWL_MVM_QUEUE_FREE; - } - if (vif->type == NL80211_IFTYPE_STATION && mvm_link->ap_sta_id == sta_id) { - /* if associated - we can't remove the AP STA now */ - if (vif->cfg.assoc) - return true; - /* first remove remaining keys */ - iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0); + iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, + link_sta->link_id); - /* unassoc - go ahead - remove the AP STA now */ - mvm_link->ap_sta_id = IWL_MVM_INVALID_STA; + mvm_link->ap_sta_id = IWL_INVALID_STA; } /* @@ -2106,11 +2080,9 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * before the STA is removed. */ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { - mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; + mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA; cancel_delayed_work(&mvm->tdls_cs.dwork); } - - return false; } int iwl_mvm_rm_sta(struct iwl_mvm *mvm, @@ -2146,8 +2118,27 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, iwl_mvm_disable_sta_queues(mvm, vif, sta); - if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret)) - return ret; + /* If there is a TXQ still marked as reserved - free it */ + if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { + u8 reserved_txq = mvm_sta->reserved_queue; + enum iwl_mvm_queue_status *status; + + /* + * If no traffic has gone through the reserved TXQ - it + * is still marked as IWL_MVM_QUEUE_RESERVED, and + * should be manually marked as free again + */ + status = &mvm->queue_info[reserved_txq].status; + if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && + (*status != IWL_MVM_QUEUE_FREE), + "sta_id %d reserved txq %d status %d", + mvm_sta->deflink.sta_id, reserved_txq, *status)) + return -EINVAL; + + *status = IWL_MVM_QUEUE_FREE; + } + + iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL); @@ -2173,9 +2164,9 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, u8 type) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || - sta->sta_id == IWL_MVM_INVALID_STA) { + sta->sta_id == IWL_INVALID_STA) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); - if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA)) return -ENOSPC; } @@ -2191,7 +2182,7 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); - sta->sta_id = IWL_MVM_INVALID_STA; + sta->sta_id = IWL_INVALID_STA; } static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, @@ -2309,7 +2300,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_INVALID_STA)) return -EINVAL; iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id, @@ -2327,7 +2318,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_INVALID_STA)) return -EINVAL; iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id, @@ -2362,7 +2353,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int queue; int ret; unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); + iwl_mvm_get_wd_timeout(mvm, vif); struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_VO, .sta_id = mvmvif->deflink.bcast_sta.sta_id, @@ -2392,7 +2383,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; - if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(bsta->sta_id == IWL_INVALID_STA)) return -ENOSPC; ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, @@ -2571,7 +2562,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; - unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); + unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif); int ret; lockdep_assert_held(&mvm->mutex); @@ -2647,7 +2638,7 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, u32 status; /* This is a valid situation for GTK removal */ - if (sta_id == IWL_MVM_INVALID_STA) + if (sta_id == IWL_INVALID_STA) return 0; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & @@ -2746,7 +2737,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, */ WARN_ON(1); - for (j = 0; j < reorder_buf->buf_size; j++) + for (j = 0; j < data->buf_size; j++) __skb_queue_purge(&entries[j].frames); spin_unlock_bh(&reorder_buf->lock); @@ -2755,7 +2746,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data, - u16 ssn, u16 buf_size) + u16 ssn) { int i; @@ -2768,12 +2759,10 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, reorder_buf->num_stored = 0; reorder_buf->head_sn = ssn; - reorder_buf->buf_size = buf_size; spin_lock_init(&reorder_buf->lock); - reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->valid = false; - for (j = 0; j < reorder_buf->buf_size; j++) + for (j = 0; j < data->buf_size; j++) __skb_queue_head_init(&entries[j].frames); } } @@ -2836,7 +2825,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) : cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), }; - u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD), + .flags = CMD_SEND_IN_RFKILL, + .len[0] = sizeof(cmd), + .data[0] = &cmd, + }; int ret; BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); @@ -2848,7 +2842,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, cmd.alloc.ssn = cpu_to_le16(ssn); cmd.alloc.win_size = cpu_to_le16(buf_size); baid = -EIO; - } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) { + } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) { cmd.remove_v1.baid = cpu_to_le32(baid); BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); } else { @@ -2857,8 +2851,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, cmd.remove.tid = cpu_to_le32(tid); } - ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd), - &cmd, &baid); + ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid); if (ret) return ret; @@ -2913,7 +2906,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* * The division below will be OK if either the cache line size * can be divided by the entry size (ALIGN will round up) or if - * if the entry size can be divided by the cache line size, in + * the entry size can be divided by the cache line size, in * which case the ALIGN() will do nothing. */ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && @@ -2978,13 +2971,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, baid_data->mvm = mvm; baid_data->tid = tid; baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); + baid_data->buf_size = buf_size; mvm_sta->tid_to_baid[tid] = baid; if (timeout) mod_timer(&baid_data->session_timer, TU_TO_EXP_TIME(timeout * 2)); - iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); + iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn); /* * protect the BA data with RCU to cover a case where our * internal RX sync mechanism will timeout (not that it's @@ -3017,16 +3011,6 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, RCU_INIT_POINTER(mvm->baid_map[baid], NULL); kfree_rcu(baid_data, rcu_head); IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); - - /* - * After we've deleted it, do another queue sync - * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently - * running it won't find a new session in the old - * BAID. It can find the NULL pointer for the BAID, - * but we must not have it find a different session. - */ - iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, - true, NULL, 0); } return 0; @@ -3217,7 +3201,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); + iwl_mvm_get_wd_timeout(mvm, vif); int queue, ret; bool alloc_queue = true; enum iwl_mvm_queue_status queue_status; @@ -3524,7 +3508,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) { u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], @@ -3579,7 +3563,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY, new_api ? 2 : 1); - if (sta_id == IWL_MVM_INVALID_STA) + if (sta_id == IWL_INVALID_STA) return -EINVAL; keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & @@ -3587,6 +3571,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, key_flags = cpu_to_le16(keyidx); key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); + if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU) + key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP); + switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); @@ -3735,7 +3722,7 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, if (remove_key) { /* This is a valid situation for IGTK */ - if (sta_id == IWL_MVM_INVALID_STA) + if (sta_id == IWL_INVALID_STA) return 0; igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); @@ -3802,7 +3789,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, return sta->addr; if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) { u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); @@ -3872,7 +3859,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; - u8 sta_id = IWL_MVM_INVALID_STA; + u8 sta_id = IWL_INVALID_STA; int ret; static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; @@ -3973,7 +3960,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; - u8 sta_id = IWL_MVM_INVALID_STA; + u8 sta_id = IWL_INVALID_STA; int ret, i; lockdep_assert_held(&mvm->mutex); @@ -4280,7 +4267,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, return; /* Need to block/unblock also multicast station */ - if (mvmvif->deflink.mcast_sta.sta_id != IWL_MVM_INVALID_STA) + if (mvmvif->deflink.mcast_sta.sta_id != IWL_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->deflink.mcast_sta, disable); @@ -4289,7 +4276,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, * Only unblock the broadcast station (FW blocks it for immediate * quiet, not the driver) */ - if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_MVM_INVALID_STA) + if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->deflink.bcast_sta, disable); @@ -4326,16 +4313,19 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, - u8 *key, u32 key_len) + u8 *key, u32 key_len, + struct ieee80211_key_conf *keyconf) { int ret; u16 queue; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_key_conf *keyconf; unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); + iwl_mvm_get_wd_timeout(mvm, vif); bool mld = iwl_mvm_has_mld_api(mvm->fw); - u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK; + u32 type = IWL_STA_LINK; + + if (mld) + type = STATION_TYPE_PEER; ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, NL80211_IFTYPE_UNSPECIFIED, type); @@ -4356,12 +4346,6 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) goto out; - keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL); - if (!keyconf) { - ret = -ENOBUFS; - goto out; - } - keyconf->cipher = cipher; memcpy(keyconf->key, key, key_len); keyconf->keylen = key_len; @@ -4382,10 +4366,9 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 0, NULL, 0, 0, true); } - kfree(keyconf); - return 0; out: - iwl_mvm_dealloc_int_sta(mvm, sta); + if (ret) + iwl_mvm_dealloc_int_sta(mvm, sta); return ret; } @@ -4406,3 +4389,80 @@ void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "Failed to cancel the channel switch\n"); } + +static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif, + u8 fw_sta_id) +{ + struct ieee80211_link_sta *link_sta = + rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]); + struct iwl_mvm_vif_link_info *link; + + if (WARN_ON_ONCE(!link_sta)) + return -EINVAL; + + link = mvmvif->link[link_sta->link_id]; + + if (WARN_ON_ONCE(!link)) + return -EINVAL; + + return link->fw_link_id; +} + +#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ) + +void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count, + bool tx, int queue) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif); + struct iwl_mvm *mvm = mvmvif->mvm; + struct iwl_mvm_tpt_counter *queue_counter; + struct iwl_mvm_mpdu_counter *link_counter; + u32 total_mpdus = 0; + int fw_link_id; + + /* Count only for a BSS sta, and only when EMLSR is possible */ + if (!mvm_sta->mpdu_counters) + return; + + /* Map sta id to link id */ + fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id); + if (fw_link_id < 0) + return; + + queue_counter = &mvm_sta->mpdu_counters[queue]; + link_counter = &queue_counter->per_link[fw_link_id]; + + spin_lock_bh(&queue_counter->lock); + + if (tx) + link_counter->tx += count; + else + link_counter->rx += count; + + /* + * When not in EMLSR, the window and the decision to enter EMLSR are + * handled during counting, when in EMLSR - in the statistics flow + */ + if (mvmvif->esr_active) + goto out; + + if (time_is_before_jiffies(queue_counter->window_start + + IWL_MVM_TPT_COUNT_WINDOW)) { + memset(queue_counter->per_link, 0, + sizeof(queue_counter->per_link)); + queue_counter->window_start = jiffies; + + IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n"); + } + + for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++) + total_mpdus += tx ? queue_counter->per_link[i].tx : + queue_counter->per_link[i].rx; + + if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH) + wiphy_work_queue(mvmvif->mvm->hw->wiphy, + &mvmvif->unblock_esr_tpt_wk); + +out: + spin_unlock_bh(&queue_counter->lock); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 3cf8a70274ce..6856f7440ef3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -12,7 +12,7 @@ #include <linux/wait.h> #include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */ -#include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */ +#include "fw-api.h" /* IWL_STATION_COUNT_MAX */ #include "rs.h" struct iwl_mvm; @@ -133,7 +133,7 @@ struct iwl_mvm_vif; * and no TID data as this is also not needed. * One thing to note, is that these stations have an ID in the fw, but not * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id - * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of + * we fill ERR_PTR(-EINVAL) in this mapping and all other dereferencing of * pointers from this mapping need to check that the value is not error * or NULL. * @@ -347,6 +347,24 @@ struct iwl_mvm_link_sta { u8 avg_energy; }; +struct iwl_mvm_mpdu_counter { + u32 tx; + u32 rx; +}; + +/** + * struct iwl_mvm_tpt_counter - per-queue MPDU counter + * + * @lock: Needed to protect the counters when modified from statistics. + * @per_link: per-link counters. + * @window_start: timestamp of the counting-window start + */ +struct iwl_mvm_tpt_counter { + spinlock_t lock; + struct iwl_mvm_mpdu_counter per_link[IWL_FW_MAX_LINK_ID]; + unsigned long window_start; +} ____cacheline_aligned_in_smp; + /** * struct iwl_mvm_sta - representation of a station in the driver * @vif: the interface the station belongs to @@ -394,6 +412,7 @@ struct iwl_mvm_link_sta { * @link: per link sta entries. For non-MLO only link[0] holds data. For MLO, * link[0] points to deflink and link[link_id] is allocated when new link * sta is added. + * @mpdu_counters: RX/TX MPDUs counters for each queue. * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -433,6 +452,8 @@ struct iwl_mvm_sta { struct iwl_mvm_link_sta deflink; struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + + struct iwl_mvm_tpt_counter *mpdu_counters; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); @@ -457,7 +478,7 @@ struct iwl_mvm_int_sta { }; /** - * Send the STA info to the FW. + * iwl_mvm_sta_send_to_fw - Send the STA info to the FW. * * @mvm: the iwl_mvm* to use * @sta: the STA @@ -486,9 +507,9 @@ void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, struct ieee80211_sta *sta); int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta); -bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, +void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_link_sta *link_sta, int *ret); + struct ieee80211_link_sta *link_sta); int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -514,6 +535,9 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count, + bool tx, int queue); + /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout); @@ -575,7 +599,8 @@ int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, - u8 *key, u32 key_len); + u8 *key, u32 key_len, + struct ieee80211_key_conf *key_conf_out); void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 id); @@ -637,6 +662,10 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct iwl_mvm_link_sta *mvm_sta_link, + unsigned int link_id); int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id); int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index e7d5f4ebeb25..36379b738de1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2022-2023 Intel Corporation + * Copyright (C) 2018-2020, 2022-2024 Intel Corporation */ #include <linux/etherdevice.h> #include "mvm.h" @@ -24,7 +24,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); - if (!sta || IS_ERR(sta) || !sta->tdls) + if (IS_ERR_OR_NULL(sta) || !sta->tdls) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -47,7 +47,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); - if (!sta || IS_ERR(sta) || !sta->tdls) + if (IS_ERR_OR_NULL(sta) || !sta->tdls) continue; if (vif) { @@ -151,7 +151,7 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; /* Protect the session to hear the TDLS setup response on the channel */ - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, duration, @@ -159,7 +159,6 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, else iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); - mutex_unlock(&mvm->mutex); } static const char * @@ -197,7 +196,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm); if (state == IWL_MVM_TDLS_SW_IDLE) - mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA; + mvm->tdls_cs.cur_sta_id = IWL_INVALID_STA; } void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) @@ -251,7 +250,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, /* get the existing peer if it's there */ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && - mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { + mvm->tdls_cs.cur_sta_id != IWL_INVALID_STA) { struct ieee80211_sta *sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex)); @@ -460,21 +459,21 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) int ret; mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); /* called after an active channel switch has finished or timed-out */ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); /* station might be gone, in that case do nothing */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) - goto out; + if (mvm->tdls_cs.peer.sta_id == IWL_INVALID_STA) + return; sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], lockdep_is_held(&mvm->mutex)); /* the station may not be here, but if it is, it must be a TDLS peer */ - if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) - goto out; + if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) + return; mvmsta = iwl_mvm_sta_from_mac80211(sta); vif = mvmsta->vif; @@ -493,8 +492,6 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) /* retry after a DTIM if we failed sending now */ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); -out: - mutex_unlock(&mvm->mutex); } int @@ -509,18 +506,17 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, unsigned int delay; int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", sta->addr, chandef->chan->center_freq, chandef->width); /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) { + if (mvm->tdls_cs.peer.sta_id != IWL_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "Existing peer. Can't start switch with %pM\n", sta->addr); - ret = -EBUSY; - goto out; + return -EBUSY; } ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, @@ -529,17 +525,15 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, oper_class, chandef, 0, 0, 0, tmpl_skb, ch_sw_tm_ie); if (ret) - goto out; + return ret; /* * Mark the peer as "in tdls switch" for this vif. We only allow a * single such peer per vif. */ mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL); - if (!mvm->tdls_cs.peer.skb) { - ret = -ENOMEM; - goto out; - } + if (!mvm->tdls_cs.peer.skb) + return -ENOMEM; mvmsta = iwl_mvm_sta_from_mac80211(sta); mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id; @@ -556,10 +550,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, vif->bss_conf.beacon_int); mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); - -out: - mutex_unlock(&mvm->mutex); - return ret; + return 0; } void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, @@ -575,7 +566,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) { + if (mvm->tdls_cs.peer.sta_id == IWL_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); goto out; } @@ -596,7 +587,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) wait_for_phy = true; - mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; + mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA; dev_kfree_skb(mvm->tdls_cs.peer.skb); mvm->tdls_cs.peer.skb = NULL; @@ -626,7 +617,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? "REQ" : "RESP"; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); IWL_DEBUG_TDLS(mvm, "Received TDLS ch switch action %s from %pM status %d\n", @@ -639,7 +630,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && params->status != 0 && mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && - mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { + mvm->tdls_cs.cur_sta_id != IWL_INVALID_STA) { struct ieee80211_sta *cur_sta; /* make sure it's the same peer */ @@ -670,5 +661,4 @@ retry: 1024 / 1000; mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); - mutex_unlock(&mvm->mutex); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile new file mode 100644 index 000000000000..6bd56a28cffd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile @@ -0,0 +1,3 @@ +iwlmvm-tests-y += module.o links.o scan.o + +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c new file mode 100644 index 000000000000..d692f1813d44 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024 Intel Corporation + */ +#include <net/mac80211.h> +#include "../mvm.h" +#include <kunit/test.h> + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static struct wiphy wiphy = { + .mtx = __MUTEX_INITIALIZER(wiphy.mtx), +}; + +static struct ieee80211_hw hw = { + .wiphy = &wiphy, +}; + +static struct ieee80211_channel chan_5ghz = { + .band = NL80211_BAND_5GHZ, +}; + +static struct ieee80211_channel chan_6ghz = { + .band = NL80211_BAND_6GHZ, +}; + +static struct ieee80211_channel chan_2ghz = { + .band = NL80211_BAND_2GHZ, +}; + +static struct cfg80211_chan_def chandef_a = {}; + +static struct cfg80211_chan_def chandef_b = {}; + +static struct iwl_mvm_phy_ctxt ctx = {}; + +static struct iwl_mvm_vif_link_info mvm_link = { + .phy_ctxt = &ctx, + .active = true +}; + +static struct cfg80211_bss bss = {}; + +static struct ieee80211_bss_conf link_conf = {.bss = &bss}; + +static const struct iwl_fw_cmd_version entry = { + .group = LEGACY_GROUP, + .cmd = BT_PROFILE_NOTIFICATION, + .notif_ver = 4 +}; + +static struct iwl_fw fw = { + .ucode_capa = { + .n_cmd_versions = 1, + .cmd_versions = &entry, + }, +}; + +static struct iwl_mvm mvm = { + .hw = &hw, + .fw = &fw, +}; + +static const struct link_grading_case { + const char *desc; + const struct cfg80211_chan_def chandef; + s32 signal; + s16 channel_util; + int chan_load_by_us; + unsigned int grade; +} link_grading_cases[] = { + { + .desc = "UHB, RSSI below range, no factors", + .chandef = { + .chan = &chan_6ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -100, + .grade = 177, + }, + { + .desc = "LB, RSSI in range, no factors", + .chandef = { + .chan = &chan_2ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -84, + .grade = 344, + }, + { + .desc = "HB, RSSI above range, no factors", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -50, + .grade = 3442, + }, + { + .desc = "HB, BSS Load IE (20 percent), inactive link, no puncturing factor", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -66, + .channel_util = 51, + .grade = 1836, + }, + { + .desc = "LB, BSS Load IE (20 percent), active link, chan_load_by_us=10 percent. No puncturing factor", + .chandef = { + .chan = &chan_2ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -61, + .channel_util = 51, + .chan_load_by_us = 10, + .grade = 2061, + }, + { + .desc = "UHB, BSS Load IE (40 percent), active link, chan_load_by_us=50 (invalid) percent. No puncturing factor", + .chandef = { + .chan = &chan_6ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -66, + .channel_util = 102, + .chan_load_by_us = 50, + .grade = 1552, + }, + { .desc = "HB, 80 MHz, no channel load factor, punctured percentage 0", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_80, + .punctured = 0x0000 + }, + .signal = -72, + .grade = 1750, + }, + { .desc = "HB, 160 MHz, no channel load factor, punctured percentage 25", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_160, + .punctured = 0x3 + }, + .signal = -72, + .grade = 1312, + }, + { .desc = "UHB, 320 MHz, no channel load factor, punctured percentage 12.5 (2/16)", + .chandef = { + .chan = &chan_6ghz, + .width = NL80211_CHAN_WIDTH_320, + .punctured = 0x3 + }, + .signal = -72, + .grade = 1806, + }, + { .desc = "HB, 160 MHz, channel load 20, channel load by us 10, punctured percentage 25", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_160, + .punctured = 0x3 + }, + .channel_util = 51, + .chan_load_by_us = 10, + .signal = -72, + .grade = 1179, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc) + +static void setup_link_conf(struct kunit *test) +{ + const struct link_grading_case *params = test->param_value; + size_t vif_size = sizeof(struct ieee80211_vif) + + sizeof(struct iwl_mvm_vif); + struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL); + struct ieee80211_bss_load_elem *bss_load; + struct element *element; + size_t ies_size = sizeof(struct cfg80211_bss_ies) + sizeof(*bss_load) + sizeof(element); + struct cfg80211_bss_ies *ies; + struct iwl_mvm_vif *mvmvif; + + KUNIT_ASSERT_NOT_NULL(test, vif); + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (params->chan_load_by_us > 0) { + ctx.channel_load_by_us = params->chan_load_by_us; + mvmvif->link[0] = &mvm_link; + } + + link_conf.vif = vif; + link_conf.chanreq.oper = params->chandef; + bss.signal = DBM_TO_MBM(params->signal); + + ies = kunit_kzalloc(test, ies_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, ies); + ies->len = sizeof(*bss_load) + sizeof(struct element); + + element = (void *)ies->data; + element->datalen = sizeof(*bss_load); + element->id = 11; + + bss_load = (void *)element->data; + bss_load->channel_util = params->channel_util; + + rcu_assign_pointer(bss.ies, ies); + rcu_assign_pointer(bss.beacon_ies, ies); +} + +static void test_link_grading(struct kunit *test) +{ + const struct link_grading_case *params = test->param_value; + unsigned int ret; + + setup_link_conf(test); + + rcu_read_lock(); + ret = iwl_mvm_get_link_grade(&link_conf); + rcu_read_unlock(); + + KUNIT_EXPECT_EQ(test, ret, params->grade); + + kunit_kfree(test, link_conf.vif); + RCU_INIT_POINTER(bss.ies, NULL); +} + +static struct kunit_case link_grading_test_cases[] = { + KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params), + {} +}; + +static struct kunit_suite link_grading = { + .name = "iwlmvm-link-grading", + .test_cases = link_grading_test_cases, +}; + +kunit_test_suite(link_grading); + +static const struct valid_link_pair_case { + const char *desc; + bool bt; + struct ieee80211_channel *chan_a; + struct ieee80211_channel *chan_b; + enum nl80211_chan_width cw_a; + enum nl80211_chan_width cw_b; + s32 sig_a; + s32 sig_b; + bool csa_a; + bool valid; +} valid_link_pair_cases[] = { + { + .desc = "HB + UHB, valid.", + .chan_a = &chan_6ghz, + .chan_b = &chan_5ghz, + .valid = true, + }, + { + .desc = "LB + HB, no BT.", + .chan_a = &chan_2ghz, + .chan_b = &chan_5ghz, + .valid = true, + }, + { + .desc = "LB + HB, with BT.", + .bt = true, + .chan_a = &chan_2ghz, + .chan_b = &chan_5ghz, + .valid = false, + }, + { + .desc = "Same band", + .chan_a = &chan_2ghz, + .chan_b = &chan_2ghz, + .valid = false, + }, + { + .desc = "RSSI: LB, 20 MHz, low", + .chan_a = &chan_2ghz, + .cw_a = NL80211_CHAN_WIDTH_20, + .sig_a = -68, + .chan_b = &chan_5ghz, + .valid = false, + }, + { + .desc = "RSSI: UHB, 20 MHz, high", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_20, + .sig_a = -66, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_20, + .valid = true, + }, + { + .desc = "RSSI: UHB, 40 MHz, low", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_40, + .sig_a = -65, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_40, + .valid = false, + }, + { + .desc = "RSSI: UHB, 40 MHz, high", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_40, + .sig_a = -63, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_40, + .valid = true, + }, + { + .desc = "RSSI: UHB, 80 MHz, low", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_80, + .sig_a = -62, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_80, + .valid = false, + }, + { + .desc = "RSSI: UHB, 80 MHz, high", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_80, + .sig_a = -60, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_80, + .valid = true, + }, + { + .desc = "RSSI: UHB, 160 MHz, low", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_160, + .sig_a = -59, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_160, + .valid = false, + }, + { + .desc = "RSSI: HB, 160 MHz, high", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_160, + .sig_a = -5, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_160, + .valid = true, + }, + { + .desc = "CSA active", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_160, + .sig_a = -5, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_160, + .valid = false, + /* same as previous entry with valid=true except for CSA */ + .csa_a = true, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(valid_link_pair, valid_link_pair_cases, desc) + +static void test_valid_link_pair(struct kunit *test) +{ + const struct valid_link_pair_case *params = test->param_value; + size_t vif_size = sizeof(struct ieee80211_vif) + + sizeof(struct iwl_mvm_vif); + struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL); + struct iwl_trans *trans = kunit_kzalloc(test, sizeof(struct iwl_trans), + GFP_KERNEL); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_link_sel_data link_a = { + .chandef = &chandef_a, + .link_id = 1, + .signal = params->sig_a, + }; + struct iwl_mvm_link_sel_data link_b = { + .chandef = &chandef_b, + .link_id = 5, + .signal = params->sig_b, + }; + struct ieee80211_bss_conf *conf; + bool result; + + KUNIT_ASSERT_NOT_NULL(test, vif); + KUNIT_ASSERT_NOT_NULL(test, trans); + + chandef_a.chan = params->chan_a; + chandef_b.chan = params->chan_b; + + chandef_a.width = params->cw_a ?: NL80211_CHAN_WIDTH_20; + chandef_b.width = params->cw_b ?: NL80211_CHAN_WIDTH_20; + + mvm.trans = trans; + + mvm.last_bt_notif.wifi_loss_low_rssi = params->bt; + mvmvif->mvm = &mvm; + + conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, conf); + conf->chanreq.oper = chandef_a; + conf->csa_active = params->csa_a; + vif->link_conf[link_a.link_id] = (void __rcu *)conf; + + conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, conf); + conf->chanreq.oper = chandef_b; + vif->link_conf[link_b.link_id] = (void __rcu *)conf; + + wiphy_lock(&wiphy); + result = iwl_mvm_mld_valid_link_pair(vif, &link_a, &link_b); + wiphy_unlock(&wiphy); + + KUNIT_EXPECT_EQ(test, result, params->valid); + + kunit_kfree(test, vif); + kunit_kfree(test, trans); +} + +static struct kunit_case valid_link_pair_test_cases[] = { + KUNIT_CASE_PARAM(test_valid_link_pair, valid_link_pair_gen_params), + {}, +}; + +static struct kunit_suite valid_link_pair = { + .name = "iwlmvm-valid-link-pair", + .test_cases = valid_link_pair_test_cases, +}; + +kunit_test_suite(valid_link_pair); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c new file mode 100644 index 000000000000..f556acbac77f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This is just module boilerplate for the iwlmvm kunit module. + * + * Copyright (C) 2024 Intel Corporation + */ +#include <linux/module.h> + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("kunit tests for iwlmvm"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c new file mode 100644 index 000000000000..7a3275199ace --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024 Intel Corporation + */ +#include <net/mac80211.h> +#include "../mvm.h" +#include <kunit/test.h> + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static const struct acs_average_db_case { + const char *desc; + u8 neg_dbm[22]; + s8 result; +} acs_average_db_cases[] = { + { + .desc = "Smallest possible value, all filled", + .neg_dbm = { + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128 + }, + .result = -128, + }, + { + .desc = "Biggest possible value, all filled", + .neg_dbm = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + }, + .result = 0, + }, + { + .desc = "Smallest possible value, partial filled", + .neg_dbm = { + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, + }, + .result = -128, + }, + { + .desc = "Biggest possible value, partial filled", + .neg_dbm = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, + }, + .result = 0, + }, + { + .desc = "Adding -80dBm to -75dBm until it is still rounded to -79dBm", + .neg_dbm = { + 75, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 0xff, 0xff, 0xff, + 0xff, 0xff, + }, + .result = -79, + }, + { + .desc = "Adding -80dBm to -75dBm until it is just rounded to -80dBm", + .neg_dbm = { + 75, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 0xff, 0xff, + 0xff, 0xff, + }, + .result = -80, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(acs_average_db, acs_average_db_cases, desc) + +static void test_acs_average_db(struct kunit *test) +{ + const struct acs_average_db_case *params = test->param_value; + struct iwl_umac_scan_channel_survey_notif notif; + int i; + + /* Test the values in the given order */ + for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++) + notif.noise[i] = params->neg_dbm[i]; + KUNIT_ASSERT_EQ(test, + iwl_mvm_average_dbm_values(¬if), + params->result); + + /* Test in reverse order */ + for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++) + notif.noise[ARRAY_SIZE(params->neg_dbm) - i - 1] = + params->neg_dbm[i]; + KUNIT_ASSERT_EQ(test, + iwl_mvm_average_dbm_values(¬if), + params->result); +} + +static struct kunit_case acs_average_db_case[] = { + KUNIT_CASE_PARAM(test_acs_average_db, acs_average_db_gen_params), + {} +}; + +static struct kunit_suite acs_average_db = { + .name = "iwlmvm-acs-average-db", + .test_cases = acs_average_db_case, +}; + +kunit_test_suite(acs_average_db); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 2e653a417d62..ebfa88b38b71 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -45,32 +45,29 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, te_data->link_id = -1; } -void iwl_mvm_roc_done_wk(struct work_struct *wk) +static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) { - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); + struct ieee80211_vif *vif = mvm->p2p_device_vif; + + lockdep_assert_held(&mvm->mutex); /* - * Clear the ROC_RUNNING status bit. + * Clear the ROC_P2P_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular - * in the case that the time event actually completed in the firmware - * (which is handled in iwl_mvm_te_handle_notif). - */ - clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); - - synchronize_net(); - - /* - * Flush the offchannel queue -- this is called when the time + * in the case that the time event actually completed in the firmware. + * + * Also flush the offchannel queue -- this is called when the time * event finishes or is canceled, so that frames queued for it * won't get stuck on the queue and be transmitted in the next * time event. */ - - mutex_lock(&mvm->mutex); - if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) { + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status)) { struct iwl_mvm_vif *mvmvif; + synchronize_net(); + /* * NB: access to this pointer would be racy, but the flush bit * can only be set when we had a P2P-Device VIF, and we have a @@ -78,9 +75,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * not really racy. */ - if (!WARN_ON(!mvm->p2p_device_vif)) { - struct ieee80211_vif *vif = mvm->p2p_device_vif; - + if (!WARN_ON(!vif)) { mvmvif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id, mvmvif->deflink.bcast_sta.tfd_queue_msk); @@ -106,6 +101,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) } /* + * P2P AUX ROC and HS2.0 ROC do not run simultaneously. * Clear the ROC_AUX_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular @@ -113,26 +109,36 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * (which is handled in iwl_mvm_te_handle_notif). */ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { - /* do the same in case of hot spot 2.0 */ + synchronize_net(); + iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id, mvm->aux_sta.tfd_queue_msk); - if (mvm->mld_api_is_used) { - iwl_mvm_mld_rm_aux_sta(mvm); - goto out_unlock; - } - /* In newer version of this command an aux station is added only * in cases of dedicated tx queue and need to be removed in end - * of use */ - if (iwl_mvm_has_new_station_api(mvm->fw)) + * of use. For the even newer mld api, use the appropriate + * function. + */ + if (mvm->mld_api_is_used) + iwl_mvm_mld_rm_aux_sta(mvm); + else if (iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); } -out_unlock: + if (!IS_ERR_OR_NULL(bss_vif)) + iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC); mutex_unlock(&mvm->mutex); } +void iwl_mvm_roc_done_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); + + mutex_lock(&mvm->mutex); + /* Mutex is released inside */ + iwl_mvm_cleanup_roc(mvm); +} + static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) { /* @@ -163,12 +169,12 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm) * So we just do nothing here and the switch * will be performed on the last TBTT. */ - if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) { + if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) { IWL_WARN(mvm, "CSA NOA started too early\n"); goto out_unlock; } - ieee80211_csa_finish(csa_vif); + ieee80211_csa_finish(csa_vif, 0); rcu_read_unlock(); @@ -214,6 +220,8 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); + + mvmvif->session_prot_connection_loss = true; } iwl_mvm_connection_loss(mvm, vif, errmsg); @@ -294,18 +302,6 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, } } -static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm) -{ - /* - * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the - * roc_done_wk is already scheduled or running, so don't schedule it - * again to avoid a race where the roc_done_wk clears this bit after - * it is set here, affecting the next run of the roc_done_wk. - */ - if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) - iwl_mvm_roc_finished(mvm); -} - /* * Handles a FW notification for an event that is known to the driver. * @@ -357,7 +353,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, switch (te_data->vif->type) { case NL80211_IFTYPE_P2P_DEVICE: ieee80211_remain_on_channel_expired(mvm->hw); - iwl_mvm_p2p_roc_finished(mvm); + iwl_mvm_roc_finished(mvm); break; case NL80211_IFTYPE_STATION: /* @@ -390,7 +386,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { - set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); ieee80211_ready_on_channel(mvm->hw); } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); @@ -400,14 +396,51 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, } } +struct iwl_mvm_rx_roc_iterator_data { + u32 activity; + bool end_activity; + bool found; +}; + +static void iwl_mvm_rx_roc_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_rx_roc_iterator_data *data = _data; + + if (mvmvif->roc_activity == data->activity) { + data->found = true; + if (data->end_activity) + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; + } +} + void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_roc_notif *notif = (void *)pkt->data; + u32 activity = le32_to_cpu(notif->activity); + bool started = le32_to_cpu(notif->success) && + le32_to_cpu(notif->started); + struct iwl_mvm_rx_roc_iterator_data data = { + .activity = activity, + .end_activity = !started, + }; - if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) && - le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) { + /* Clear vif roc_activity if done (set to ROC_NUM_ACTIVITIES) */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_rx_roc_iterator, + &data); + /* + * It is possible that the ROC was canceled + * but the notification was already fired. + */ + if (!data.found) + return; + + if (started) { set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); ieee80211_ready_on_channel(mvm->hw); } else { @@ -692,7 +725,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, /* Determine whether mac or link id should be used, and validate the link id */ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 link_id) + s8 link_id) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ver = iwl_fw_lookup_cmd_ver(mvm->fw, @@ -706,8 +739,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm, "Invalid link ID for session protection: %u\n", link_id)) return -EINVAL; - if (WARN(ieee80211_vif_is_mld(vif) && - !(vif->active_links & BIT(link_id)), + if (WARN(!mvmvif->link[link_id]->active, "Session Protection on an inactive link: %u\n", link_id)) return -EINVAL; @@ -716,10 +748,10 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm, static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 id, u32 link_id) + u32 id, s8 link_id) { int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id); - struct iwl_mvm_session_prot_cmd cmd = { + struct iwl_session_prot_cmd cmd = { .id_and_color = cpu_to_le32(mac_link_id), .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), .conf_id = cpu_to_le32(id), @@ -737,6 +769,21 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret); } +static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity) +{ + struct iwl_roc_req roc_cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .activity = cpu_to_le32(activity), + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0, + sizeof(roc_cmd), &roc_cmd); + if (ret) + IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret); +} + static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, u32 *uid) @@ -745,7 +792,10 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif = te_data->vif; struct iwl_mvm_vif *mvmvif; enum nl80211_iftype iftype; - unsigned int link_id; + s8 link_id; + bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm); + u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0); if (!vif) return false; @@ -770,20 +820,28 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); - /* When session protection is used, the te_data->id field - * is reused to save session protection's configuration. - * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set - * to HOT_SPOT_CMD. - */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) && - id != HOT_SPOT_CMD) { + if ((p2p_aux && iftype == NL80211_IFTYPE_P2P_DEVICE) || + (roc_ver >= 3 && mvmvif->roc_activity == ROC_ACTIVITY_HOTSPOT)) { + if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) { + iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity); + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; + iwl_mvm_roc_finished(mvm); + } + return false; + } else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) && + id != HOT_SPOT_CMD) { + /* When session protection is used, the te_data->id field + * is reused to save session protection's configuration. + * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id + * field is set to HOT_SPOT_CMD. + */ if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) { /* Session protection is still ongoing. Cancel it */ iwl_mvm_cancel_session_protection(mvm, vif, id, link_id); if (iftype == NL80211_IFTYPE_P2P_DEVICE) { - iwl_mvm_p2p_roc_finished(mvm); + iwl_mvm_roc_finished(mvm); } } return false; @@ -897,11 +955,10 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data; + struct iwl_session_prot_notif *notif = (void *)pkt->data; unsigned int ver = - iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(MAC_CONF_GROUP, - SESSION_PROTECTION_CMD), 2); + iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, + SESSION_PROTECTION_NOTIF, 2); int id = le32_to_cpu(notif->mac_link_id); struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; @@ -929,7 +986,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 && mvmvif->time_event_data.link_id != notif_link_id, - "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n", + "SESSION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n", notif_link_id, mvmvif->time_event_data.link_id)) goto out_unlock; @@ -973,13 +1030,15 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; mvmvif->time_event_data.link_id = -1; - iwl_mvm_p2p_roc_finished(mvm); + /* set the bit so the ROC cleanup will actually clean up */ + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); + iwl_mvm_roc_finished(mvm); ieee80211_remain_on_channel_expired(mvm->hw); } else if (le32_to_cpu(notif->start)) { if (WARN_ON(mvmvif->time_event_data.id != le32_to_cpu(notif->conf_id))) goto out_unlock; - set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); ieee80211_ready_on_channel(mvm->hw); /* Start TE */ } @@ -987,6 +1046,103 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, rcu_read_unlock(); } +#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) +#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) +#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) +#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) +#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) + +void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, + u32 duration_ms, + u32 *duration_tu, + u32 *delay) +{ + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + u32 dtim_interval = 0; + + *delay = AUX_ROC_MIN_DELAY; + *duration_tu = MSEC_TO_TU(duration_ms); + + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + dtim_interval = + max_t(u32, dtim_interval, + link_conf->dtim_period * link_conf->beacon_int); + } + rcu_read_unlock(); + + /* + * If we are associated we want the delay time to be at least one + * dtim interval so that the FW can wait until after the DTIM and + * then start the time event, this will potentially allow us to + * remain off-channel for the max duration. + * Since we want to use almost a whole dtim interval we would also + * like the delay to be for 2-3 dtim intervals, in case there are + * other time events with higher priority. + * dtim_interval should never be 0, it can be 1 if we don't know it + * (we haven't heard any beacon yet). + */ + if (vif->cfg.assoc && !WARN_ON(!dtim_interval)) { + *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); + /* We cannot remain off-channel longer than the DTIM interval */ + if (dtim_interval <= *duration_tu) { + *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER; + if (*duration_tu <= AUX_ROC_MIN_DURATION) + *duration_tu = dtim_interval - + AUX_ROC_MIN_SAFETY_BUFFER; + } + } +} + +int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, + struct ieee80211_channel *channel, + struct ieee80211_vif *vif, + int duration, enum iwl_roc_activity activity) +{ + int res; + u32 duration_tu, delay; + struct iwl_roc_req roc_req = { + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .activity = cpu_to_le32(activity), + .sta_id = cpu_to_le32(mvm->aux_sta.sta_id), + }; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(mvmvif->roc_activity != ROC_NUM_ACTIVITIES)) + return -EBUSY; + + /* Set the channel info data */ + iwl_mvm_set_chan_info(mvm, &roc_req.channel_info, + channel->hw_value, + iwl_mvm_phy_band_from_nl80211(channel->band), + IWL_PHY_CHANNEL_MODE20, 0); + + iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu, + &delay); + roc_req.duration = cpu_to_le32(duration_tu); + roc_req.max_delay = cpu_to_le32(delay); + + IWL_DEBUG_TE(mvm, + "\t(requested = %ums, max_delay = %ums)\n", + duration, delay); + IWL_DEBUG_TE(mvm, + "Requesting to remain on channel %u for %utu. activity %u\n", + channel->hw_value, duration_tu, activity); + + /* Set the node address */ + memcpy(roc_req.node_addr, vif->addr, ETH_ALEN); + + res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), + 0, sizeof(roc_req), &roc_req); + if (!res) + mvmvif->roc_activity = activity; + + return res; +} + static int iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -994,7 +1150,7 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, enum ieee80211_roc_type type) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_session_prot_cmd cmd = { + struct iwl_session_prot_cmd cmd = { .id_and_color = cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), @@ -1125,74 +1281,71 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm) __iwl_mvm_remove_time_event(mvm, te_data, &uid); } -static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity) -{ - int ret; - struct iwl_roc_req roc_cmd = { - .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), - .activity = cpu_to_le32(activity), - }; - - lockdep_assert_held(&mvm->mutex); - ret = iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(MAC_CONF_GROUP, ROC_CMD), - 0, sizeof(roc_cmd), &roc_cmd); - WARN_ON(ret); -} - -static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif) -{ - u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD); - u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); - - if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) - iwl_mvm_remove_aux_roc_te(mvm, mvmvif, - &mvmvif->hs_time_event_data); - else if (fw_ver == 3) - iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT); - else - IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver); -} - void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data; + bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm); + u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0); + int iftype = vif->type; - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { - mvmvif = iwl_mvm_vif_from_mac80211(vif); + mutex_lock(&mvm->mutex); + + if (p2p_aux || (roc_ver >= 3 && iftype != NL80211_IFTYPE_P2P_DEVICE)) { + if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) { + iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity); + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; + } + goto cleanup_roc; + } else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { + te_data = &mvmvif->time_event_data; - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + if (iftype == NL80211_IFTYPE_P2P_DEVICE) { + if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) { + IWL_DEBUG_TE(mvm, + "No remain on channel event\n"); + mutex_unlock(&mvm->mutex); + return; + } iwl_mvm_cancel_session_protection(mvm, vif, - mvmvif->time_event_data.id, - mvmvif->time_event_data.link_id); - iwl_mvm_p2p_roc_finished(mvm); + te_data->id, + te_data->link_id); } else { - iwl_mvm_roc_station_remove(mvm, mvmvif); - iwl_mvm_roc_finished(mvm); + iwl_mvm_remove_aux_roc_te(mvm, mvmvif, + &mvmvif->hs_time_event_data); } - - return; + goto cleanup_roc; } te_data = iwl_mvm_get_roc_te(mvm); if (!te_data) { IWL_WARN(mvm, "No remain on channel event\n"); + mutex_unlock(&mvm->mutex); return; } mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { + iftype = te_data->vif->type; + if (iftype == NL80211_IFTYPE_P2P_DEVICE) iwl_mvm_remove_time_event(mvm, mvmvif, te_data); - iwl_mvm_p2p_roc_finished(mvm); - } else { + else iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); - iwl_mvm_roc_finished(mvm); - } + +cleanup_roc: + /* + * In case we get here before the ROC event started, + * (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will + * cleanup things properly + */ + if (p2p_aux || iftype != NL80211_IFTYPE_P2P_DEVICE) + set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); + else + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); + + /* Mutex is released inside this function */ + iwl_mvm_cleanup_roc(mvm); } void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm, @@ -1266,7 +1419,7 @@ static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait, { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); - struct iwl_mvm_session_prot_notif *resp; + struct iwl_session_prot_notif *resp; int resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF || @@ -1297,8 +1450,8 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) }; struct iwl_notification_wait wait_notif; - int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id); - struct iwl_mvm_session_prot_cmd cmd = { + int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id); + struct iwl_session_prot_cmd cmd = { .id_and_color = cpu_to_le32(mac_link_id), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index dee9c367dcd3..d92470960b38 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2019-2022 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -299,7 +299,7 @@ static void check_exit_ctkill(struct work_struct *work) ret = iwl_mvm_get_temp(mvm, &temp); - __iwl_mvm_mac_stop(mvm); + __iwl_mvm_mac_stop(mvm, false); if (ret) goto reschedule; @@ -555,6 +555,22 @@ static int compare_temps(const void *a, const void *b) return ((s16)le16_to_cpu(*(__le16 *)a) - (s16)le16_to_cpu(*(__le16 *)b)); } + +struct iwl_trip_walk_data { + __le16 *thresholds; + int count; +}; + +static int iwl_trip_temp_cb(struct thermal_trip *trip, void *arg) +{ + struct iwl_trip_walk_data *twd = arg; + + if (trip->temperature == THERMAL_TEMP_INVALID) + return 0; + + twd->thresholds[twd->count++] = cpu_to_le16((s16)(trip->temperature / 1000)); + return 0; +} #endif int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm) @@ -562,42 +578,25 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm) struct temp_report_ths_cmd cmd = {0}; int ret; #ifdef CONFIG_THERMAL - int i, j, idx = 0; + struct iwl_trip_walk_data twd = { .thresholds = cmd.thresholds, .count = 0 }; lockdep_assert_held(&mvm->mutex); if (!mvm->tz_device.tzone) goto send; - /* The driver holds array of temperature trips that are unsorted - * and uncompressed, the FW should get it compressed and sorted + /* + * The thermal core holds an array of temperature trips that are + * unsorted and uncompressed, the FW should get it compressed and + * sorted. */ /* compress trips to cmd array, remove uninitialized values*/ - for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { - if (mvm->tz_device.trips[i].temperature != INT_MIN) { - cmd.thresholds[idx++] = - cpu_to_le16((s16)(mvm->tz_device.trips[i].temperature / 1000)); - } - } - cmd.num_temps = cpu_to_le32(idx); - - if (!idx) - goto send; - - /*sort cmd array*/ - sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL); + for_each_thermal_trip(mvm->tz_device.tzone, iwl_trip_temp_cb, &twd); - /* we should save the indexes of trips because we sort - * and compress the orginal array - */ - for (i = 0; i < idx; i++) { - for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) { - if ((int)(le16_to_cpu(cmd.thresholds[i]) * 1000) == - mvm->tz_device.trips[j].temperature) - mvm->tz_device.fw_trips_index[i] = j; - } - } + cmd.num_temps = cpu_to_le32(twd.count); + if (twd.count) + sort(cmd.thresholds, twd.count, sizeof(s16), compare_temps, NULL); send: #endif @@ -619,48 +618,41 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, int ret; int temp; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { - ret = -ENODATA; - goto out; + /* + * Tell the core that there is no valid temperature value to + * return, but it need not worry about this. + */ + *temperature = THERMAL_TEMP_INVALID; + return 0; } ret = iwl_mvm_get_temp(mvm, &temp); if (ret) - goto out; + return ret; *temperature = temp * 1000; - -out: - mutex_unlock(&mvm->mutex); - return ret; + return 0; } static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, - int trip, int temp) + const struct thermal_trip *trip, int temp) { struct iwl_mvm *mvm = thermal_zone_device_priv(device); - int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (!iwl_mvm_firmware_running(mvm) || - mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { - ret = -EIO; - goto out; - } + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; - if ((temp / 1000) > S16_MAX) { - ret = -EINVAL; - goto out; - } + if ((temp / 1000) > S16_MAX) + return -EINVAL; - ret = iwl_mvm_send_temp_report_ths_cmd(mvm); -out: - mutex_unlock(&mvm->mutex); - return ret; + return iwl_mvm_send_temp_report_ths_cmd(mvm); } static struct thermal_zone_device_ops tzone_ops = { @@ -668,9 +660,6 @@ static struct thermal_zone_device_ops tzone_ops = { .set_trip_temp = iwl_mvm_tzone_set_trip_temp, }; -/* make all trips writable */ -#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1) - static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) { int i, ret; @@ -686,10 +675,18 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF); + /* + * 0 is a valid temperature, + * so initialize the array with S16_MIN which invalid temperature + */ + for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) { + mvm->tz_device.trips[i].temperature = THERMAL_TEMP_INVALID; + mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE; + mvm->tz_device.trips[i].flags = THERMAL_TRIP_FLAG_RW_TEMP; + } mvm->tz_device.tzone = thermal_zone_device_register_with_trips(name, mvm->tz_device.trips, IWL_MAX_DTS_TRIPS, - IWL_WRITABLE_TRIPS_MSK, mvm, &tzone_ops, NULL, 0, 0); if (IS_ERR(mvm->tz_device.tzone)) { @@ -704,15 +701,6 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) if (ret) { IWL_DEBUG_TEMP(mvm, "Failed to enable thermal zone\n"); thermal_zone_device_unregister(mvm->tz_device.tzone); - return; - } - - /* 0 is a valid temperature, - * so initialize the array with S16_MIN which invalid temperature - */ - for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) { - mvm->tz_device.trips[i].temperature = INT_MIN; - mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE; } } @@ -738,27 +726,18 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, unsigned long new_state) { struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); - int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (!iwl_mvm_firmware_running(mvm) || - mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { - ret = -EIO; - goto unlock; - } - - if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { - ret = -EINVAL; - goto unlock; - } + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; - ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, - new_state); + if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) + return -EINVAL; -unlock: - mutex_unlock(&mvm->mutex); - return ret; + return iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, + new_state); } static const struct thermal_cooling_device_ops tcooling_ops = { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 461f26d9214e..f67afb66ef2b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -12,7 +12,8 @@ #include <net/ipv6.h> #include "iwl-trans.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" +#include "iwl-utils.h" #include "mvm.h" #include "sta.h" #include "time-sync.h" @@ -520,6 +521,31 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, } } +static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, + struct ieee80211_hdr *hdr, + struct ieee80211_tx_info *info) +{ + if (unlikely(!mvmsta)) + return true; + + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) + return true; + + if (likely(ieee80211_is_data(hdr->frame_control) && + mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED)) + return false; + + /* + * Not a data frame, use host rate if on an old device that + * can't possibly be doing MLO (firmware may be selecting a + * bad rate), if we might be doing MLO we need to let FW pick + * (since we don't necesarily know the link), but FW rate + * selection was fixed. + */ + return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ; +} + static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen, const u8 *addr3_override) { @@ -567,12 +593,12 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, flags |= IWL_TX_FLAGS_ENCRYPT_DIS; /* - * For data and mgmt packets rate info comes from the fw. Only + * For data and mgmt packets rate info comes from the fw (for + * new devices, older FW is somewhat broken for this). Only * set rate/antenna for injected frames with fixed rate, or - * when no sta is given. + * when no sta is given, or with older firmware. */ - if (unlikely(!sta || - info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) { + if (unlikely(iwl_mvm_use_host_rate(mvm, mvmsta, hdr, info))) { flags |= IWL_TX_FLAGS_CMD_RATE; rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, @@ -777,10 +803,30 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (info.control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info.control.vif); + bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm); - if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info.control.vif->type == NL80211_IFTYPE_AP || - info.control.vif->type == NL80211_IFTYPE_ADHOC) { + if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE && + p2p_aux) || + (info.control.vif->type == NL80211_IFTYPE_STATION && + offchannel)) { + /* + * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets + * that can be used in 2 different types of vifs, P2P + * Device and STATION. + * P2P Device uses the offchannel queue. + * STATION (HS2.0) uses the auxiliary context of the FW, + * and hence needs to be sent on the aux queue. + * If P2P_DEV_OVER_AUX is supported (p2p_aux = true) + * also P2P Device uses the aux queue. + */ + sta_id = mvm->aux_sta.sta_id; + queue = mvm->aux_queue; + if (WARN_ON(queue == IWL_MVM_INVALID_QUEUE)) + return -1; + } else if (info.control.vif->type == + NL80211_IFTYPE_P2P_DEVICE || + info.control.vif->type == NL80211_IFTYPE_AP || + info.control.vif->type == NL80211_IFTYPE_ADHOC) { u32 link_id = u32_get_bits(info.control.flags, IEEE80211_TX_CTRL_MLO_LINK); struct iwl_mvm_vif_link_info *link; @@ -806,18 +852,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; - } else if (info.control.vif->type == NL80211_IFTYPE_STATION && - offchannel) { - /* - * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets - * that can be used in 2 different types of vifs, P2P & - * STATION. - * P2P uses the offchannel queue. - * STATION (HS2.0) uses the auxiliary context of the FW, - * and hence needs to be sent on the aux queue. - */ - sta_id = mvm->aux_sta.sta_id; - queue = mvm->aux_queue; } } @@ -881,10 +915,10 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, if (WARN_ON(!link_conf)) band = NL80211_BAND_2GHZ; else - band = link_conf->chandef.chan->band; + band = link_conf->chanreq.oper.chan->band; rcu_read_unlock(); } else { - band = mvmsta->vif->bss_conf.chandef.chan->band; + band = mvmsta->vif->bss_conf.chanreq.oper.chan->band; } lmac = iwl_mvm_get_lmac_id(mvm, band); @@ -905,72 +939,6 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, #ifdef CONFIG_INET -static int -iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, - netdev_features_t netdev_flags, - struct sk_buff_head *mpdus_skb) -{ - struct sk_buff *tmp, *next; - struct ieee80211_hdr *hdr = (void *)skb->data; - char cb[sizeof(skb->cb)]; - u16 i = 0; - unsigned int tcp_payload_len; - unsigned int mss = skb_shinfo(skb)->gso_size; - bool ipv4 = (skb->protocol == htons(ETH_P_IP)); - bool qos = ieee80211_is_data_qos(hdr->frame_control); - u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; - - skb_shinfo(skb)->gso_size = num_subframes * mss; - memcpy(cb, skb->cb, sizeof(cb)); - - next = skb_gso_segment(skb, netdev_flags); - skb_shinfo(skb)->gso_size = mss; - skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; - if (WARN_ON_ONCE(IS_ERR(next))) - return -EINVAL; - else if (next) - consume_skb(skb); - - skb_list_walk_safe(next, tmp, next) { - memcpy(tmp->cb, cb, sizeof(tmp->cb)); - /* - * Compute the length of all the data added for the A-MSDU. - * This will be used to compute the length to write in the TX - * command. We have: SNAP + IP + TCP for n -1 subframes and - * ETH header for n subframes. - */ - tcp_payload_len = skb_tail_pointer(tmp) - - skb_transport_header(tmp) - - tcp_hdrlen(tmp) + tmp->data_len; - - if (ipv4) - ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); - - if (tcp_payload_len > mss) { - skb_shinfo(tmp)->gso_size = mss; - skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 : - SKB_GSO_TCPV6; - } else { - if (qos) { - u8 *qc; - - if (ipv4) - ip_send_check(ip_hdr(tmp)); - - qc = ieee80211_get_qos_ctl((void *)tmp->data); - *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - } - skb_shinfo(tmp)->gso_size = 0; - } - - skb_mark_not_on_list(tmp); - __skb_queue_tail(mpdus_skb, tmp); - i++; - } - - return 0; -} - static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, @@ -984,13 +952,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 tid; - snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + - tcp_hdrlen(skb); + snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb); if (!mvmsta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || !mvmsta->amsdu_enabled) - return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); + return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Do not build AMSDU for IPv6 with extension headers. @@ -1000,7 +967,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != IPPROTO_TCP) { netdev_flags &= ~NETIF_F_CSUM_MASK; - return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); + return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); } tid = ieee80211_get_tid(hdr); @@ -1014,7 +981,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, if ((info->flags & IEEE80211_TX_CTL_AMPDU && !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) || !(mvmsta->amsdu_enabled & BIT(tid))) - return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); + return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Take the min of ieee80211 station and mvm station @@ -1072,8 +1039,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * Trick the segmentation function to make it * create SKBs that can fit into one A-MSDU. */ - return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags, - mpdus_skb); + return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skb); } #else /* CONFIG_INET */ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, @@ -1165,6 +1131,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, bool is_ampdu = false; int hdrlen; + if (WARN_ON_ONCE(!sta)) + return -1; + mvmsta = iwl_mvm_sta_from_mac80211(sta); fc = hdr->frame_control; hdrlen = ieee80211_hdrlen(fc); @@ -1172,10 +1141,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; - if (WARN_ON_ONCE(!mvmsta)) - return -1; - - if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA)) return -1; if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he) @@ -1305,7 +1271,7 @@ drop: int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvmsta; struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; struct ieee80211_vif *vif; @@ -1314,10 +1280,12 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct sk_buff *orig_skb = skb; const u8 *addr3; - if (WARN_ON_ONCE(!mvmsta)) + if (WARN_ON_ONCE(!sta)) return -1; - if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA)) return -1; memcpy(&info, skb->cb, sizeof(info)); @@ -1636,12 +1604,18 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, * of the batch. This is why the SSN of the SCD is written at the end of the * whole struct at a variable offset. This function knows how to cope with the * variable offset and returns the SSN of the SCD. + * + * For 22000-series and lower, this is just 12 bits. For later, 16 bits. */ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, - struct iwl_mvm_tx_resp *tx_resp) + struct iwl_tx_resp *tx_resp) { - return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + - tx_resp->frame_count) & 0xfff; + u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + + tx_resp->frame_count); + + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return val & 0xFFFF; + return val & 0xFFF; } static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, @@ -1650,10 +1624,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct ieee80211_sta *sta; u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); - /* struct iwl_mvm_tx_resp_v3 is almost the same */ - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; - int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); - int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); + /* struct iwl_tx_resp_v3 is almost the same */ + struct iwl_tx_resp *tx_resp = (void *)pkt->data; + int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid); + int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid); struct agg_tx_status *agg_status = iwl_mvm_get_agg_status(mvm, tx_resp); u32 status = le16_to_cpu(agg_status->status); @@ -1834,6 +1808,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", next_reclaimed); + if (tid < IWL_MAX_TID_COUNT) + iwl_mvm_count_mpdu(mvmsta, sta_id, 1, + true, 0); } else { IWL_DEBUG_TX_REPLY(mvm, "NDP - don't update next_reclaimed\n"); @@ -1907,7 +1884,7 @@ static const char *iwl_get_agg_tx_status(u16 status) static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + struct iwl_tx_resp *tx_resp = (void *)pkt->data; struct agg_tx_status *frame_status = iwl_mvm_get_agg_status(mvm, tx_resp); int i; @@ -1941,9 +1918,9 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; - int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); - int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); + struct iwl_tx_resp *tx_resp = (void *)pkt->data; + int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid); + int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid); u16 sequence = le16_to_cpu(pkt->hdr.sequence); struct iwl_mvm_sta *mvmsta; int queue = SEQ_TO_QUEUE(sequence); @@ -1982,7 +1959,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; + struct iwl_tx_resp *tx_resp = (void *)pkt->data; if (tx_resp->frame_count == 1) iwl_mvm_rx_tx_cmd_single(mvm, pkt); @@ -2146,7 +2123,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) ba_info.flags = IEEE80211_TX_STAT_AMPDU; if (iwl_mvm_has_new_tx_api(mvm)) { - struct iwl_mvm_compressed_ba_notif *ba_res = + struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data; u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info); u16 tfd_cnt; @@ -2174,6 +2151,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) tfd_cnt, pkt_len)) return; + IWL_DEBUG_TX_REPLY(mvm, + "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", + sta_id, le32_to_cpu(ba_res->flags), + le16_to_cpu(ba_res->txed), + le16_to_cpu(ba_res->done)); + rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); @@ -2188,8 +2171,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) /* Free per TID */ for (i = 0; i < tfd_cnt; i++) { - struct iwl_mvm_compressed_ba_tfd *ba_tfd = - &ba_res->tfd[i]; + struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; tid = ba_tfd->tid; if (tid == IWL_MGMT_TID) @@ -2205,16 +2187,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) le32_to_cpu(ba_res->tx_rate), false); } - if (mvmsta) + if (mvmsta) { iwl_mvm_tx_airtime(mvm, mvmsta, le32_to_cpu(ba_res->wireless_time)); - rcu_read_unlock(); - IWL_DEBUG_TX_REPLY(mvm, - "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", - sta_id, le32_to_cpu(ba_res->flags), - le16_to_cpu(ba_res->txed), - le16_to_cpu(ba_res->done)); + iwl_mvm_count_mpdu(mvmsta, sta_id, + le16_to_cpu(ba_res->txed), true, 0); + } + rcu_read_unlock(); return; } @@ -2246,9 +2226,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) rcu_read_unlock(); - iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, - tid_data->rate_n_flags, false); - IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from %pM, sta_id = %d\n", ba_notif->sta_addr, ba_notif->sta_id); @@ -2261,6 +2238,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", ba_notif->reduced_txp); + + iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, + tid_data->rate_n_flags, false); } /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 91286018a69d..dd890dcd1505 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -249,6 +249,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) * This is the special case in which init is set and we call a callback in * this case to clear the state indicating that station creation is in * progress. + * + * Returns: an error code indicating success or failure */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq) { @@ -259,7 +261,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq) .data = { lq, }, }; - if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA || + if (WARN_ON(lq->sta_id == IWL_INVALID_STA || iwl_mvm_has_tlc_offload(mvm))) return -EINVAL; @@ -295,6 +297,10 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (vif->type != NL80211_IFTYPE_STATION) return; + /* SMPS is handled by firmware */ + if (iwl_mvm_has_rlc_offload(mvm)) + return; + mvmvif = iwl_mvm_vif_from_mac80211(vif); if (WARN_ON_ONCE(!mvmvif->link[link_id])) @@ -342,6 +348,26 @@ static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait, return true; } +#define PERIODIC_STAT_RATE 5 + +int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable) +{ + u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK; + u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER | + IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0; + struct iwl_system_statistics_cmd system_cmd = { + .cfg_mask = cpu_to_le32(flags), + .config_time_sec = cpu_to_le32(enable ? + PERIODIC_STAT_RATE : 0), + .type_id_mask = cpu_to_le32(type), + }; + + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + 0, sizeof(system_cmd), &system_cmd); +} + static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear, u8 cmd_ver) { @@ -413,6 +439,13 @@ int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) IWL_FW_CMD_VER_UNKNOWN); int ret; + /* + * Don't request statistics during restart, they'll not have any useful + * information right after restart, nor is clearing needed + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return 0; + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver); @@ -646,10 +679,8 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bss_iface_iterator, &bss_iter_data); - if (bss_iter_data.error) { - IWL_ERR(mvm, "More than one managed interface active!\n"); + if (bss_iter_data.error) return ERR_PTR(-EINVAL); - } return bss_iter_data.vif; } @@ -714,58 +745,20 @@ bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm) } unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool tdls, bool cmd_q) + struct ieee80211_vif *vif) { - struct iwl_fw_dbg_trigger_tlv *trigger; - struct iwl_fw_dbg_trigger_txq_timer *txq_timer; - unsigned int default_timeout = cmd_q ? - IWL_DEF_WD_TIMEOUT : + unsigned int default_timeout = mvm->trans->trans_cfg->base_params->wd_timeout; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { - /* - * We can't know when the station is asleep or awake, so we - * must disable the queue hang detection. - */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && - vif && vif->type == NL80211_IFTYPE_AP) - return IWL_WATCHDOG_DISABLED; - return default_timeout; - } - - trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); - txq_timer = (void *)trigger->data; - - if (tdls) - return le32_to_cpu(txq_timer->tdls); - - if (cmd_q) - return le32_to_cpu(txq_timer->command_queue); - - if (WARN_ON(!vif)) - return default_timeout; - - switch (ieee80211_vif_type_p2p(vif)) { - case NL80211_IFTYPE_ADHOC: - return le32_to_cpu(txq_timer->ibss); - case NL80211_IFTYPE_STATION: - return le32_to_cpu(txq_timer->bss); - case NL80211_IFTYPE_AP: - return le32_to_cpu(txq_timer->softap); - case NL80211_IFTYPE_P2P_CLIENT: - return le32_to_cpu(txq_timer->p2p_client); - case NL80211_IFTYPE_P2P_GO: - return le32_to_cpu(txq_timer->p2p_go); - case NL80211_IFTYPE_P2P_DEVICE: - return le32_to_cpu(txq_timer->p2p_device); - case NL80211_IFTYPE_MONITOR: - return default_timeout; - default: - WARN_ON(1); - return mvm->trans->trans_cfg->base_params->wd_timeout; - } + /* + * We can't know when the station is asleep or awake, so we + * must disable the queue hang detection. + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && + vif->type == NL80211_IFTYPE_AP) + return IWL_WATCHDOG_DISABLED; + return default_timeout; } void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -863,7 +856,7 @@ static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) { - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, @@ -871,8 +864,6 @@ static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); - - mutex_unlock(&mvm->mutex); } static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk) @@ -1101,10 +1092,9 @@ void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm) spin_unlock(&mvm->tcm.lock); if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) { - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (iwl_mvm_request_statistics(mvm, true)) handle_uapsd = false; - mutex_unlock(&mvm->mutex); } spin_lock(&mvm->tcm.lock); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c index 080a1587caa5..0f7fa6032c66 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c @@ -104,9 +104,9 @@ static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = { }; enum iwl_mvm_vendor_events_idx { - /* 0x0 - 0x3 are deprecated */ - IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4, - NUM_IWL_MVM_VENDOR_EVENT_IDX + /* 0x0 - 0x3 are deprecated */ + IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4, + NUM_IWL_MVM_VENDOR_EVENT_IDX }; static const struct nl80211_vendor_cmd_info diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index fa4a14546860..838c426db7f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -1,13 +1,34 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ +#include <linux/dmi.h> #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" #include "internal.h" #include "iwl-prph.h" +static const struct dmi_system_id dmi_force_scu_active_approved_list[] = { + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + }, + }, + /* keep last */ + {} +}; + +static bool iwl_is_force_scu_active_approved(void) +{ + return !!dmi_check_system(dmi_force_scu_active_approved_list); +} + static void iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans, struct iwl_prph_scratch_hwm_cfg *dbg_cfg, @@ -68,7 +89,8 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans, } break; default: - IWL_ERR(trans, "WRT: Invalid buffer destination\n"); + IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n", + le32_to_cpu(fw_mon_cfg->buf_location)); } out: if (dbg_flags) @@ -84,6 +106,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; struct iwl_prph_info *prph_info; u32 control_flags = 0; + u32 control_flags_ext = 0; int ret; int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); @@ -108,6 +131,12 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, break; } + if (trans->dsbr_urm_fw_dependent) + control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW; + + if (trans->dsbr_urm_permanent) + control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM; + /* Allocate prph scratch */ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), &trans_pcie->prph_scratch_dma_addr, @@ -119,7 +148,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, prph_sc_ctrl->version.version = 0; prph_sc_ctrl->version.mac_id = - cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); + cpu_to_le16((u16)trans->hw_rev); prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); control_flags |= IWL_PRPH_SCRATCH_MTR_MODE; @@ -128,6 +157,14 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, if (trans->trans_cfg->imr_enabled) control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN; + if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && + iwl_is_force_scu_active_approved()) { + control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE; + IWL_DEBUG_FW(trans, + "Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n", + IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE); + } + /* initialize RX default queue */ prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); @@ -135,6 +172,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg, &control_flags); prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); + prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext); /* initialize the Step equalizer data */ prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step); @@ -187,7 +225,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, ctxt_info_gen3->cr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4); ctxt_info_gen3->mtr_base_addr = - cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); + cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); ctxt_info_gen3->mtr_size = diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 5f55efe64bf5..344e4d5a1c6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" @@ -180,7 +180,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ctxt_info->version.version = 0; ctxt_info->version.mac_id = - cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); + cpu_to_le16((u16)trans->hw_rev); /* size is in DWs */ ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); @@ -218,7 +218,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, /* initialize TX command queue */ ctxt_info->hcmd_cfg.cmd_queue_addr = - cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); + cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 2c9b98c8184b..e0b657b2f74b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -33,7 +33,7 @@ extern int _invalid_type; .driver_data = _ASSIGN_CFG(cfg) /* Hardware specific file defines the PCI IDs table for that hardware module */ -static const struct pci_device_id iwl_hw_card_ids[] = { +VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = { #if IS_ENABLED(CONFIG_IWLDVM) {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ @@ -492,7 +492,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)}, {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, - {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)}, @@ -501,18 +500,55 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, /* Bz devices */ - {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)}, /* Sc devices */ {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)}, + {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)}, + {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)}, + {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)}, + +/* Dr devices */ + {IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)}, #endif /* CONFIG_IWLMVM */ {0} }; MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ @@ -526,7 +562,7 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ IWL_CFG_ANY, _cfg, _name) -static const struct iwl_dev_info iwl_dev_info_table[] = { +VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) /* 9000 */ IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name), @@ -942,11 +978,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { iwl_cfg_ma, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_ma, iwl_ax221_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_ma, iwl_ax231_name), @@ -997,20 +1028,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), -/* Bz */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_bz_name), - -/* Ga (Gl) */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_gl, iwl_bz_name), - /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, @@ -1091,23 +1108,66 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), -/* MsP */ -/* For now we use the same FW as MR, but this will change in the future. */ +/* Bz */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_ms_a0, iwl_ax204_name), + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_ax201_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_ms_a0, iwl_ax204_name), + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_ma, iwl_ax204_name), + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_fm_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_wh_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_ax201_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_ax211_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_fm_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_wh_name), + +/* Ga (Gl) */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_gl, iwl_gl_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_gl, iwl_mtp_name), /* Sc */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, @@ -1115,8 +1175,37 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc, iwl_sc_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2, iwl_sc2_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2f, iwl_sc2f_name), +/* Dr */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_dr, iwl_dr_name), + +/* Br */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_br, iwl_br_name), #endif /* CONFIG_IWLMVM */ }; +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table); + +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +const unsigned int iwl_dev_info_table_size = ARRAY_SIZE(iwl_dev_info_table); +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size); +#endif /* * Read rf id and cdb info from prph register and store it @@ -1125,6 +1214,7 @@ static void get_crf_id(struct iwl_trans *iwl_trans) { u32 sd_reg_ver_addr; u32 val = 0; + u8 step; if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) sd_reg_ver_addr = SD_REG_VER_GEN2; @@ -1143,6 +1233,27 @@ static void get_crf_id(struct iwl_trans *iwl_trans) iwl_trans->hw_cnv_id = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); + /* For BZ-W, take B step also when A step is indicated */ + if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) + step = SILICON_B_STEP; + + /* In BZ, the MAC step must be read from the CNVI aux register */ + if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) { + step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id); + + /* For BZ-U, take B step also when A step is indicated */ + if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) == + CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) && + step == SILICON_A_STEP) + step = SILICON_B_STEP; + } + + if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ || + CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) { + iwl_trans->hw_rev_step = step; + iwl_trans->hw_rev |= step; + } + /* Read cdb info (also contains the jacket info if needed in the future */ iwl_trans->hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); @@ -1185,17 +1296,15 @@ static int map_crf_id(struct iwl_trans *iwl_trans) case REG_CRF_ID_TYPE_GF: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); break; - case REG_CRF_ID_TYPE_MR: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12); - break; case REG_CRF_ID_TYPE_FM: - case REG_CRF_ID_TYPE_FMI: - case REG_CRF_ID_TYPE_FMR: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); break; case REG_CRF_ID_TYPE_WHP: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12); break; + case REG_CRF_ID_TYPE_PE: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12); + break; default: ret = -EIO; IWL_ERR(iwl_trans, @@ -1236,7 +1345,7 @@ out: /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 -static const struct iwl_dev_info * +VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step) @@ -1299,6 +1408,48 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, return NULL; } +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info); + +static void iwl_pcie_recheck_me_status(struct work_struct *wk) +{ + struct iwl_trans *trans = container_of(wk, typeof(*trans), + me_recheck_wk.work); + u32 val; + + val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG); + trans->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP); +} + +static void iwl_pcie_check_me_status(struct iwl_trans *trans) +{ + u32 val; + + trans->me_present = -1; + + INIT_DELAYED_WORK(&trans->me_recheck_wk, + iwl_pcie_recheck_me_status); + + /* we don't have a good way of determining this until BZ */ + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) + return; + + val = iwl_read_prph(trans, CNVI_SCU_REG_FOR_ECO_1); + if (val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN) { + trans->me_present = + !!(val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT); + return; + } + + val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG); + if (val & (CSR_HW_IF_CONFIG_REG_ME_OWN | + CSR_HW_IF_CONFIG_REG_IAMT_UP)) { + trans->me_present = 1; + return; + } + + /* recheck again later, ME might still be initializing */ + schedule_delayed_work(&trans->me_recheck_wk, HZ); +} static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1329,6 +1480,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); + iwl_trans_pcie_check_product_reset_status(pdev); + iwl_trans_pcie_check_product_reset_mode(pdev); + /* * Let's try to grab NIC access early here. Sometimes, NICs may * fail to initialize, and if that happens it's better if we see @@ -1382,6 +1536,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (dev_info) { iwl_trans->cfg = dev_info->cfg; iwl_trans->name = dev_info->name; + iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160; } #if IS_ENABLED(CONFIG_IWLMVM) @@ -1447,6 +1602,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!iwl_trans->name) iwl_trans->name = iwl_trans->cfg->name; + IWL_INFO(iwl_trans, "Detected %s\n", iwl_trans->name); + if (iwl_trans->trans_cfg->mq_rx_supported) { if (WARN_ON(!iwl_trans->cfg->num_rbds)) { ret = -EINVAL; @@ -1472,6 +1629,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, iwl_trans); + iwl_pcie_check_me_status(iwl_trans); + /* try to get ownership so that we'll know if we don't own it */ iwl_pcie_prepare_card_hw(iwl_trans); @@ -1499,6 +1658,8 @@ static void iwl_pci_remove(struct pci_dev *pdev) if (!trans) return; + cancel_delayed_work_sync(&trans->me_recheck_wk); + iwl_drv_stop(trans->drv); iwl_trans_pcie_free(trans); @@ -1516,11 +1677,12 @@ static int iwl_pci_suspend(struct device *device) return 0; } -static int iwl_pci_resume(struct device *device) +static int _iwl_pci_resume(struct device *device, bool restore) { struct pci_dev *pdev = to_pci_dev(device); struct iwl_trans *trans = pci_get_drvdata(pdev); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool device_was_powered_off = false; /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if @@ -1536,6 +1698,26 @@ static int iwl_pci_resume(struct device *device) if (!trans->op_mode) return 0; + /* + * Scratch value was altered, this means the device was powered off, we + * need to reset it completely. + * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan, + * so assume that any bits there mean that the device is usable. + */ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && + !iwl_read32(trans, CSR_FUNC_SCRATCH)) + device_was_powered_off = true; + + if (restore || device_was_powered_off) { + trans->state = IWL_TRANS_NO_FW; + /* Hope for the best here ... If one of those steps fails we + * won't really know how to recover. + */ + iwl_pcie_prepare_card_hw(trans); + iwl_finish_nic_init(trans); + iwl_op_mode_device_powered_off(trans->op_mode); + } + /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return 0; @@ -1556,9 +1738,23 @@ static int iwl_pci_resume(struct device *device) return 0; } +static int iwl_pci_restore(struct device *device) +{ + return _iwl_pci_resume(device, true); +} + +static int iwl_pci_resume(struct device *device) +{ + return _iwl_pci_resume(device, false); +} + static const struct dev_pm_ops iwl_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, - iwl_pci_resume) + .suspend = pm_sleep_ptr(iwl_pci_suspend), + .resume = pm_sleep_ptr(iwl_pci_resume), + .freeze = pm_sleep_ptr(iwl_pci_suspend), + .thaw = pm_sleep_ptr(iwl_pci_resume), + .poweroff = pm_sleep_ptr(iwl_pci_suspend), + .restore = pm_sleep_ptr(iwl_pci_restore), }; #define IWL_PM_OPS (&iwl_dev_pm_ops) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 7805a42948af..45460f93d24a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2003-2015, 2018-2023 Intel Corporation + * Copyright (C) 2003-2015, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -22,7 +22,6 @@ #include "iwl-io.h" #include "iwl-op-mode.h" #include "iwl-drv.h" -#include "queue/tx.h" #include "iwl-context-info.h" /* @@ -273,7 +272,7 @@ enum iwl_pcie_fw_reset_state { }; /** - * enum wl_pcie_imr_status - imr dma transfer state + * enum iwl_pcie_imr_status - imr dma transfer state * @IMR_D2S_IDLE: default value of the dma transfer * @IMR_D2S_REQUESTED: dma transfer requested * @IMR_D2S_COMPLETED: dma transfer completed @@ -287,6 +286,58 @@ enum iwl_pcie_imr_status { }; /** + * struct iwl_pcie_txqs - TX queues data + * + * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) + * @page_offs: offset from skb->cb to mac header page pointer + * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer + * @queue_used: bit mask of used queues + * @queue_stopped: bit mask of stopped queues + * @txq: array of TXQ data structures representing the TXQs + * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler + * @queue_alloc_cmd_ver: queue allocation command version + * @bc_pool: bytecount DMA allocations pool + * @bc_tbl_size: bytecount table size + * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO + * (and similar usage) + * @cmd: command queue data + * @cmd.fifo: FIFO number + * @cmd.q_id: queue ID + * @cmd.wdg_timeout: watchdog timeout + * @tfd: TFD data + * @tfd.max_tbs: max number of buffers per TFD + * @tfd.size: TFD size + * @tfd.addr_size: TFD/TB address size + */ +struct iwl_pcie_txqs { + unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; + struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; + struct dma_pool *bc_pool; + size_t bc_tbl_size; + bool bc_table_dword; + u8 page_offs; + u8 dev_cmd_offs; + struct iwl_tso_hdr_page __percpu *tso_hdr_page; + + struct { + u8 fifo; + u8 q_id; + unsigned int wdg_timeout; + } cmd; + + struct { + u8 max_tbs; + u16 size; + u8 addr_size; + } tfd; + + struct iwl_dma_ptr scd_bc_tbls; + + u8 queue_alloc_cmd_ver; +}; + +/** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues @@ -367,6 +418,7 @@ enum iwl_pcie_imr_status { * @is_down: indicates the NIC is down * @isr_stats: interrupt statistics * @napi_dev: (fake) netdev for NAPI registration + * @txqs: transport tx queues data. */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -386,7 +438,7 @@ struct iwl_trans_pcie { dma_addr_t iml_dma_addr; struct iwl_trans *trans; - struct net_device napi_dev; + struct net_device *napi_dev; /* INT ICT Table */ __le32 *ict_tbl; @@ -464,6 +516,8 @@ struct iwl_trans_pcie { enum iwl_pcie_imr_status imr_status; wait_queue_head_t imr_waitq; char rf_name[32]; + + struct iwl_pcie_txqs txqs; }; static inline struct iwl_trans_pcie * @@ -509,6 +563,9 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); __cond_lock(nic_access_nobh, \ likely(__iwl_trans_pcie_grab_nic_access(trans))) +void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev); +void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev); + /***************************************************** * RX ******************************************************/ @@ -538,6 +595,33 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans); /***************************************************** * TX / HCMD ******************************************************/ +/* We need 2 entries for the TX command and header, and another one might + * be needed for potential data in the SKB's head. The remaining ones can + * be used for frags. + */ +#define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3) + +struct iwl_tso_hdr_page { + struct page *page; + u8 *pos; +}; + +/* + * Note that we put this struct *last* in the page. By doing that, we ensure + * that no TB referencing this page can trigger the 32-bit boundary hardware + * bug. + */ +struct iwl_tso_page_info { + dma_addr_t dma_addr; + struct page *next; + refcount_t use_count; +}; + +#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info)) +#define IWL_TSO_PAGE_INFO(addr) \ + ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \ + IWL_TSO_PAGE_DATA_SIZE)) + int iwl_pcie_tx_init(struct iwl_trans *trans); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); @@ -552,10 +636,172 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); -int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); +int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue); + +dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, + unsigned int len); +struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta, + u8 **hdr, unsigned int hdr_room, + unsigned int offset); + +void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta); + +static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr) +{ + dma_addr_t res; + + res = IWL_TSO_PAGE_INFO(addr)->dma_addr; + res += (unsigned long)addr & ~PAGE_MASK; + + return res; +} + +static inline dma_addr_t +iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) +{ + return txq->first_tb_dma + + sizeof(struct iwl_pcie_first_tb_buf) * idx; +} + +static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index) +{ + return index & (q->n_window - 1); +} + +static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, int idx) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans->trans_cfg->gen2) + idx = iwl_txq_get_cmd_index(txq, idx); + + return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx; +} + +/* + * We need this inline in case dma_addr_t is only 32-bits - since the + * hardware is always 64-bit, the issue can still occur in that case, + * so use u64 for 'phys' here to force the addition in 64-bit. + */ +static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len) +{ + return upper_32_bits(phys) != upper_32_bits(phys + len); +} + +int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q); + +static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) { + iwl_op_mode_queue_full(trans->op_mode, txq->id); + IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); + } else { + IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", + txq->id); + } +} + +/** + * iwl_txq_inc_wrap - increment queue index, wrap back to beginning + * @trans: the transport (for configuration data) + * @index: current index + */ +static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) +{ + return ++index & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); +} + +/** + * iwl_txq_dec_wrap - decrement queue index, wrap back to end + * @trans: the transport (for configuration data) + * @index: current index + */ +static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) +{ + return --index & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); +} + +void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); + +static inline void +iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) { + IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); + iwl_op_mode_queue_not_full(trans->op_mode, txq->id); + } +} + +int iwl_txq_gen2_set_tb(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd, dma_addr_t addr, + u16 len); + +static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd) +{ + tfd->num_tbs = 0; + + iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, + trans->invalid_tx_cmd.size); +} + +void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_tfh_tfd *tfd); + +int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, + u32 sta_mask, u8 tid, + int size, unsigned int timeout); + +int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int txq_id); + +void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); +void iwl_txq_gen2_tx_free(struct iwl_trans *trans); +int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue); +int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, + int queue_size); + +static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, + void *_tfd, u8 idx) +{ + struct iwl_tfd *tfd; + struct iwl_tfd_tb *tb; + + if (trans->trans_cfg->gen2) { + struct iwl_tfh_tfd *tfh_tfd = _tfd; + struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; + + return le16_to_cpu(tfh_tb->tb_len); + } + + tfd = (struct iwl_tfd *)_tfd; + tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, + struct sk_buff_head *skbs, bool is_flush); +void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); +void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, bool freeze); +int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx); +int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm); /***************************************************** * Error handling @@ -822,12 +1068,51 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); +void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans); #else static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } #endif void iwl_pcie_rx_allocator_work(struct work_struct *data); +/* common trans ops for all generations transports */ +void iwl_trans_pcie_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg); +int iwl_trans_pcie_start_hw(struct iwl_trans *trans); +void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans); +void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val); +void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val); +u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs); +u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg); +void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val); +int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords); +int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords); +int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership); +struct iwl_trans_dump_data * +iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx); +int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, + enum iwl_d3_status *status, + bool test, bool reset); +int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset); +void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable); +void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); +void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value); +int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, + u32 *val); +bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); +void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); + +/* transport gen 1 exported functions */ +void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr); +int iwl_trans_pcie_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill); +void iwl_trans_pcie_stop_device(struct iwl_trans *trans); + /* common functions that are used by gen2 transport */ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); void iwl_pcie_apm_config(struct iwl_trans *trans); @@ -849,13 +1134,10 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); /* transport gen 2 exported functions */ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); -void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); -void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); -void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, - bool test, bool reset); int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, @@ -864,5 +1146,7 @@ void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt); int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt); +int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data); #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 9c2461ba13c5..4a442d03d8d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2023 Intel Corporation + * Copyright (C) 2003-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1000,6 +1000,11 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget); +static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev) +{ + return *(struct iwl_trans_pcie **)netdev_priv(dev); +} + static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) { struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); @@ -1007,7 +1012,7 @@ static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) struct iwl_trans *trans; int ret; - trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); + trans_pcie = iwl_netdev_to_trans_pcie(napi->dev); trans = trans_pcie->trans; ret = iwl_pcie_rx_handle(trans, rxq->id, budget); @@ -1034,7 +1039,7 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) struct iwl_trans *trans; int ret; - trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); + trans_pcie = iwl_netdev_to_trans_pcie(napi->dev); trans = trans_pcie->trans; ret = iwl_pcie_rx_handle(trans, rxq->id, budget); @@ -1131,7 +1136,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) if (trans_pcie->msix_enabled) poll = iwl_pcie_napi_poll_msix; - netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, + netif_napi_add(trans_pcie->napi_dev, &rxq->napi, poll); napi_enable(&rxq->napi); } @@ -1296,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, int i) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; bool page_stolen = false; int max_len = trans_pcie->rx_buf_bytes; u32 offset = 0; @@ -1673,6 +1678,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) */ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ @@ -1689,14 +1695,14 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) } for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { - if (!trans->txqs.txq[i]) + if (!trans_pcie->txqs.txq[i]) continue; - del_timer(&trans->txqs.txq[i]->stuck_timer); + del_timer(&trans_pcie->txqs.txq[i]->stuck_timer); } /* The STATUS_FW_ERROR bit is set in this function. This must happen * before we wake up the command caller, to ensure a proper cleanup. */ - iwl_trans_fw_error(trans, false); + iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans->wait_command_queue); @@ -2291,7 +2297,9 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) { IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n", inta_hw); - /* TODO: PLDR flow required here for >= Bz */ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_trans_pcie_reset(trans, + IWL_RESET_MODE_PROD_RESET); } /* Error detected by uCode */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index a4a4772330cf..793514a1852a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include "iwl-trans.h" #include "iwl-prph.h" @@ -43,7 +43,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) * wake device's PCI Express link L1a -> L0s */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + CSR_HW_IF_CONFIG_REG_HAP_WAKE); iwl_pcie_apm_config(trans); @@ -68,8 +68,8 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE | - CSR_HW_IF_CONFIG_REG_ENABLE_PME); + CSR_HW_IF_CONFIG_REG_WAKE_ME | + CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN); mdelay(1); iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -123,14 +123,21 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) "timeout waiting for FW reset ACK (inta_hw=0x%x)\n", inta_hw); - if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)) - iwl_trans_fw_error(trans, true); + if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)) { + struct iwl_fw_error_dump_mode mode = { + .type = IWL_ERR_TYPE_RESET_HS_TIMEOUT, + .context = IWL_ERR_CONTEXT_FROM_OPMODE, + }; + iwl_op_mode_nic_error(trans->op_mode, + IWL_ERR_TYPE_RESET_HS_TIMEOUT); + iwl_op_mode_dump_error(trans->op_mode, &mode); + } } trans_pcie->fw_reset_state = FW_RESET_IDLE; } -void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) +static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -139,9 +146,9 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (trans_pcie->is_down) return; - if (trans->state >= IWL_TRANS_FW_STARTED) - if (trans_pcie->fw_reset_handshake) - iwl_trans_pcie_fw_reset_handshake(trans); + if (trans->state >= IWL_TRANS_FW_STARTED && + trans_pcie->fw_reset_handshake) + iwl_trans_pcie_fw_reset_handshake(trans); trans_pcie->is_down = true; @@ -247,7 +254,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ - if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size)) + if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size)) return -ENOMEM; /* enable shadow regs in HW */ @@ -287,9 +294,6 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans) case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): pos = scnprintf(buf, buflen, "HRCDB"); break; - case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_MS): - pos = scnprintf(buf, buflen, "MS"); - break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_FM): pos = scnprintf(buf, buflen, "FM"); break; @@ -339,16 +343,17 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans) pos += scnprintf(buf + pos, buflen - pos, "\n"); } -void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_pcie_reset_ict(trans); /* make sure all queue are not stopped/used */ - memset(trans->txqs.queue_stopped, 0, - sizeof(trans->txqs.queue_stopped)); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + memset(trans_pcie->txqs.queue_stopped, 0, + sizeof(trans_pcie->txqs.queue_stopped)); + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); /* now that we got alive we can free the fw image & the context info. * paging memory cannot be freed included since FW will still use it @@ -525,6 +530,8 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, keep_ram_busy = !iwl_pcie_set_ltr(trans); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n", + iwl_read32(trans, CSR_FUNC_SCRATCH)); iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_ROM_START); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 63e13577aff8..c917ed4c19bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2007-2015, 2018-2023 Intel Corporation + * Copyright (C) 2007-2015, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -24,6 +24,7 @@ #include "fw/error-dump.h" #include "fw/dbg.h" #include "fw/api/tx.h" +#include "fw/acpi.h" #include "mei/iwl-mei.h" #include "internal.h" #include "iwl-fh.h" @@ -127,8 +128,7 @@ out: kfree(buf); } -static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, - bool retake_ownership) +int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { @@ -312,7 +312,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) * wake device's PCI Express link L1a -> L0s */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + CSR_HW_IF_CONFIG_REG_HAP_WAKE); iwl_pcie_apm_config(trans); @@ -440,7 +440,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) * SHRD_HW_RST is applied in S3. */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + CSR_HW_IF_CONFIG_REG_PERSISTENCE); /* * Clear "initialization complete" bit to move adapter from @@ -509,8 +509,8 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE | - CSR_HW_IF_CONFIG_REG_ENABLE_PME); + CSR_HW_IF_CONFIG_REG_WAKE_ME | + CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN); mdelay(1); iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -582,12 +582,12 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) int ret; iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + CSR_HW_IF_CONFIG_REG_PCI_OWN_SET); /* See if we got it */ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_PCI_OWN_SET, + CSR_HW_IF_CONFIG_REG_PCI_OWN_SET, HW_READY_TIMEOUT); if (ret >= 0) @@ -621,7 +621,7 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE); + CSR_HW_IF_CONFIG_REG_WAKE_ME); do { ret = iwl_pcie_set_hw_ready(trans); @@ -1336,8 +1336,8 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) } } -static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, bool run_in_rfkill) +int iwl_trans_pcie_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; @@ -1423,7 +1423,7 @@ out: return ret; } -static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) +void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) { iwl_pcie_reset_ict(trans); iwl_pcie_tx_start(trans, scd_addr); @@ -1458,7 +1458,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, iwl_trans_pcie_rf_kill(trans, hw_rfkill, false); } -static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) +void iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; @@ -1484,16 +1484,13 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq) IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", state ? "disabled" : "enabled"); - if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { - if (trans->trans_cfg->gen2) - _iwl_trans_pcie_gen2_stop_device(trans); - else - _iwl_trans_pcie_stop_device(trans, from_irq); - } + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) && + !WARN_ON(trans->trans_cfg->gen2)) + _iwl_trans_pcie_stop_device(trans, from_irq); } -void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, - bool test, bool reset) +static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, + bool test, bool reset) { iwl_disable_interrupts(trans); @@ -1508,9 +1505,17 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, iwl_pcie_synchronize_irqs(trans); - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_INIT); + } else { + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + } if (reset) { /* @@ -1555,15 +1560,14 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) return 0; } -static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, - bool reset) +int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { int ret; if (!reset) /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + CSR_HW_IF_CONFIG_REG_PERSISTENCE); ret = iwl_pcie_d3_handshake(trans, true); if (ret) @@ -1574,9 +1578,9 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, return 0; } -static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, - enum iwl_d3_status *status, - bool test, bool reset) +int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, + enum iwl_d3_status *status, + bool test, bool reset) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; @@ -1589,8 +1593,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, goto out; } - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + else + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); ret = iwl_finish_nic_init(trans); if (ret) @@ -1636,6 +1644,8 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, out: if (*status == IWL_D3_STATUS_ALIVE) ret = iwl_pcie_d3_handshake(trans, false); + else + trans->state = IWL_TRANS_NO_FW; return ret; } @@ -1718,6 +1728,7 @@ enable_msi: static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) { +#if defined(CONFIG_SMP) int iter_rx_q, i, ret, cpu, offset; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1738,6 +1749,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) "Failed to set affinity mask for IRQ %d\n", trans_pcie->msix_entries[i].vector); } +#endif } static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, @@ -1875,7 +1887,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) return 0; } -static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) +int iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; @@ -1887,7 +1899,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) return ret; } -static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) +void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1907,17 +1919,17 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) iwl_pcie_synchronize_irqs(trans); } -static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) +void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) { writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } -static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) +void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) { writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } -static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) +u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) { return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } @@ -1930,7 +1942,7 @@ static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) return 0x000FFFFF; } -static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) +u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) { u32 mask = iwl_trans_pcie_prph_msk(trans); @@ -1939,8 +1951,7 @@ static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); } -static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, - u32 val) +void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val) { u32 mask = iwl_trans_pcie_prph_msk(trans); @@ -1949,20 +1960,19 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } -static void iwl_trans_pcie_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg) +void iwl_trans_pcie_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* free all first - we might be reconfigured for a different size */ iwl_pcie_free_rbs_pool(trans); - trans->txqs.cmd.q_id = trans_cfg->cmd_queue; - trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; - trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; - trans->txqs.page_offs = trans_cfg->cb_data_offs; - trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); - trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; + trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue; + trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo; + trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs; + trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); + trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) trans_pcie->n_no_reclaim_cmds = 0; @@ -1981,19 +1991,12 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); - trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; + trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; - /* Initialize NAPI here - it should be before registering to mac80211 - * in the opmode but after the HW struct is allocated. - * As this function may be called again in some corner cases don't - * do anything if NAPI was already initialized. - */ - if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) - init_dummy_netdev(&trans_pcie->napi_dev); trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; } @@ -2075,6 +2078,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_free_ict(trans); } + free_netdev(trans_pcie->napi_dev); + iwl_pcie_free_invalid_tx_cmd(trans); iwl_pcie_free_fw_monitor(trans); @@ -2085,21 +2090,173 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) trans->dev); mutex_destroy(&trans_pcie->mutex); + + if (trans_pcie->txqs.tso_hdr_page) { + for_each_possible_cpu(i) { + struct iwl_tso_hdr_page *p = + per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i); + + if (p && p->page) + __free_page(p->page); + } + + free_percpu(trans_pcie->txqs.tso_hdr_page); + } + iwl_trans_free(trans); } -static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) +static union acpi_object * +iwl_trans_pcie_call_prod_reset_dsm(struct pci_dev *pdev, u16 cmd, u16 value) +{ +#ifdef CONFIG_ACPI + struct iwl_dsm_internal_product_reset_cmd pldr_arg = { + .cmd = cmd, + .value = value, + }; + union acpi_object arg = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = sizeof(pldr_arg), + .buffer.pointer = (void *)&pldr_arg, + }; + static const guid_t dsm_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29, + 0x81, 0x4F, 0x75, 0xE4, + 0xDD, 0x26, 0xB5, 0xFD); + + if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &dsm_guid, ACPI_DSM_REV, + DSM_INTERNAL_FUNC_PRODUCT_RESET)) + return ERR_PTR(-ENODEV); + + return iwl_acpi_get_dsm_object(&pdev->dev, ACPI_DSM_REV, + DSM_INTERNAL_FUNC_PRODUCT_RESET, + &arg, &dsm_guid); +#else + return ERR_PTR(-EOPNOTSUPP); +#endif +} + +void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev) +{ + union acpi_object *res; + + res = iwl_trans_pcie_call_prod_reset_dsm(pdev, + DSM_INTERNAL_PLDR_CMD_GET_MODE, + 0); + if (IS_ERR(res)) + return; + + if (res->type != ACPI_TYPE_INTEGER) + IWL_ERR_DEV(&pdev->dev, + "unexpected return type from product reset DSM\n"); + else + IWL_DEBUG_DEV_POWER(&pdev->dev, + "product reset mode is 0x%llx\n", + res->integer.value); + + ACPI_FREE(res); +} + +static void iwl_trans_pcie_set_product_reset(struct pci_dev *pdev, bool enable, + bool integrated) +{ + union acpi_object *res; + u16 mode = enable ? DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET : 0; + + if (!integrated) + mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR | + DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON; + + res = iwl_trans_pcie_call_prod_reset_dsm(pdev, + DSM_INTERNAL_PLDR_CMD_SET_MODE, + mode); + if (IS_ERR(res)) { + if (enable) + IWL_ERR_DEV(&pdev->dev, + "ACPI _DSM not available (%d), cannot do product reset\n", + (int)PTR_ERR(res)); + return; + } + + ACPI_FREE(res); + IWL_DEBUG_DEV_POWER(&pdev->dev, "%sabled product reset via DSM\n", + enable ? "En" : "Dis"); + iwl_trans_pcie_check_product_reset_mode(pdev); +} + +void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev) { - if (state) - set_bit(STATUS_TPOWER_PMI, &trans->status); + union acpi_object *res; + + res = iwl_trans_pcie_call_prod_reset_dsm(pdev, + DSM_INTERNAL_PLDR_CMD_GET_STATUS, + 0); + if (IS_ERR(res)) + return; + + if (res->type != ACPI_TYPE_INTEGER) + IWL_ERR_DEV(&pdev->dev, + "unexpected return type from product reset DSM\n"); else - clear_bit(STATUS_TPOWER_PMI, &trans->status); + IWL_DEBUG_DEV_POWER(&pdev->dev, + "product reset status is 0x%llx\n", + res->integer.value); + + ACPI_FREE(res); +} + +static void iwl_trans_pcie_call_reset(struct pci_dev *pdev) +{ +#ifdef CONFIG_ACPI + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *p, *ref; + acpi_status status; + int ret = -EINVAL; + + status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev), + "_PRR", NULL, &buffer); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n"); + goto out; + } + p = buffer.pointer; + + if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 1) { + pci_err(pdev, "Bad _PRR return type\n"); + goto out; + } + + ref = &p->package.elements[0]; + if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) { + pci_err(pdev, "_PRR wasn't a reference\n"); + goto out; + } + + status = acpi_evaluate_object(ref->reference.handle, + "_RST", NULL, NULL); + if (ACPI_FAILURE(status)) { + pci_err(pdev, + "Failed to call _RST on object returned by _PRR (%d)\n", + status); + goto out; + } + ret = 0; +out: + kfree(buffer.pointer); + if (!ret) { + IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n"); + return; + } + IWL_DEBUG_DEV_POWER(&pdev->dev, + "No BIOS support, using pci_reset_function()\n"); +#endif + pci_reset_function(pdev); } struct iwl_trans_pcie_removal { struct pci_dev *pdev; struct work_struct work; - bool rescan; + enum iwl_reset_mode mode; + bool integrated; }; static void iwl_trans_pcie_removal_wk(struct work_struct *wk) @@ -2117,14 +2274,66 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk) if (!bus) goto out; - dev_err(&pdev->dev, "Device gone - attempting removal\n"); - kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); + if (removal->mode == IWL_RESET_MODE_PROD_RESET) { + struct pci_dev *bt = NULL; + + if (!removal->integrated) { + /* discrete devices have WiFi/BT at function 0/1 */ + int slot = PCI_SLOT(pdev->devfn); + int func = PCI_FUNC(pdev->devfn); + + if (func == 0) + bt = pci_get_slot(bus, PCI_DEVFN(slot, 1)); + else + pci_info(pdev, "Unexpected function %d\n", + func); + } else { + /* on integrated we have to look up by ID (same bus) */ + static const struct pci_device_id bt_device_ids[] = { +#define BT_DEV(_id) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, _id) } + BT_DEV(0xA876), /* LNL */ + BT_DEV(0xE476), /* PTL-P */ + BT_DEV(0xE376), /* PTL-H */ + BT_DEV(0xD346), /* NVL-H */ + BT_DEV(0x6E74), /* NVL-S */ + BT_DEV(0x4D76), /* WCL */ + BT_DEV(0xD246), /* RZL-H */ + BT_DEV(0x6C46), /* RZL-M */ + {} + }; + struct pci_dev *tmp = NULL; + + for_each_pci_dev(tmp) { + if (tmp->bus != bus) + continue; + + if (pci_match_id(bt_device_ids, tmp)) { + bt = tmp; + break; + } + } + } + + if (bt) { + pci_info(bt, "Removal by WiFi due to product reset\n"); + pci_stop_and_remove_bus_device(bt); + pci_dev_put(bt); + } + } + + iwl_trans_pcie_set_product_reset(pdev, + removal->mode == + IWL_RESET_MODE_PROD_RESET, + removal->integrated); + if (removal->mode >= IWL_RESET_MODE_FUNC_RESET) + iwl_trans_pcie_call_reset(pdev); + pci_stop_and_remove_bus_device(pdev); pci_dev_put(pdev); - if (removal->rescan) { + if (removal->mode >= IWL_RESET_MODE_RESCAN) { if (bus->parent) bus = bus->parent; pci_rescan_bus(bus); @@ -2137,14 +2346,27 @@ out: module_put(THIS_MODULE); } -void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan) +void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode) { struct iwl_trans_pcie_removal *removal; + char _msg = 0, *msg = &_msg; + + if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY)) + return; if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return; - IWL_ERR(trans, "Device gone - scheduling removal!\n"); + if (trans->me_present && mode == IWL_RESET_MODE_PROD_RESET) { + mode = IWL_RESET_MODE_FUNC_RESET; + if (trans->me_present < 0) + msg = " instead of product reset as ME may be present"; + else + msg = " instead of product reset as ME is present"; + } + + IWL_INFO(trans, "scheduling reset (mode=%d%s)\n", mode, msg); + iwl_pcie_dump_csr(trans); /* @@ -2171,12 +2393,13 @@ void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan) set_bit(STATUS_TRANS_DEAD, &trans->status); removal->pdev = to_pci_dev(trans->dev); - removal->rescan = rescan; + removal->mode = mode; + removal->integrated = trans->trans_cfg->integrated; INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); pci_dev_get(removal->pdev); schedule_work(&removal->work); } -EXPORT_SYMBOL(iwl_trans_pcie_remove); +EXPORT_SYMBOL(iwl_trans_pcie_reset); /* * This version doesn't disable BHs but rather assumes they're @@ -2241,7 +2464,8 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) iwl_trans_pcie_dump_regs(trans); if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) - iwl_trans_pcie_remove(trans, false); + iwl_trans_pcie_reset(trans, + IWL_RESET_MODE_REMOVE_ONLY); else iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); @@ -2259,7 +2483,7 @@ out: return true; } -static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { bool ret; @@ -2273,7 +2497,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) return false; } -static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) +void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2303,8 +2527,8 @@ out: spin_unlock_bh(&trans_pcie->reg_lock); } -static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) +int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) { #define IWL_MAX_HW_ERRS 5 unsigned int num_consec_hw_errors = 0; @@ -2353,8 +2577,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, return 0; } -static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords) +int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords) { int offs, ret = 0; const u32 *vals = buf; @@ -2371,8 +2595,8 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, return ret; } -static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, - u32 *val) +int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, + u32 *val) { return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, ofs, val); @@ -2380,8 +2604,8 @@ static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, #define IWL_FLUSH_WAIT_MS 2000 -static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, - struct iwl_trans_rxq_dma_data *data) +int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2396,8 +2620,9 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, return 0; } -static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) +int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; unsigned long now = jiffies; bool overflow_tx; @@ -2407,11 +2632,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; - if (!test_bit(txq_idx, trans->txqs.queue_used)) + if (!test_bit(txq_idx, trans_pcie->txqs.queue_used)) return -EINVAL; IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); - txq = trans->txqs.txq[txq_idx]; + txq = trans_pcie->txqs.txq[txq_idx]; spin_lock_bh(&txq->lock); overflow_tx = txq->overflow_tx || @@ -2457,8 +2682,9 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) return 0; } -static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) +int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int cnt; int ret = 0; @@ -2467,9 +2693,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) cnt < trans->trans_cfg->base_params->num_of_queues; cnt++) { - if (cnt == trans->txqs.cmd.q_id) + if (cnt == trans_pcie->txqs.cmd.q_id) continue; - if (!test_bit(cnt, trans->txqs.queue_used)) + if (!test_bit(cnt, trans_pcie->txqs.queue_used)) continue; if (!(BIT(cnt) & txq_bm)) continue; @@ -2482,8 +2708,8 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) return ret; } -static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, - u32 mask, u32 value) +void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2642,12 +2868,13 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state = v; struct iwl_trans *trans = priv->trans; - struct iwl_txq *txq = trans->txqs.txq[state->pos]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos]; seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", (unsigned int)state->pos, - !!test_bit(state->pos, trans->txqs.queue_used), - !!test_bit(state->pos, trans->txqs.queue_stopped)); + !!test_bit(state->pos, trans_pcie->txqs.queue_used), + !!test_bit(state->pos, trans_pcie->txqs.queue_stopped)); if (txq) seq_printf(seq, "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", @@ -2657,7 +2884,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) else seq_puts(seq, "(unallocated)"); - if (state->pos == trans->txqs.cmd.q_id) + if (state->pos == trans_pcie->txqs.cmd.q_id) seq_puts(seq, " (HCMD)"); seq_puts(seq, "\n"); @@ -3025,12 +3252,47 @@ static ssize_t iwl_dbgfs_rf_read(struct file *file, strlen(trans_pcie->rf_name)); } +static ssize_t iwl_dbgfs_reset_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + static const char * const modes[] = { + [IWL_RESET_MODE_SW_RESET] = "n/a", + [IWL_RESET_MODE_REPROBE] = "n/a", + [IWL_RESET_MODE_REMOVE_ONLY] = "remove", + [IWL_RESET_MODE_RESCAN] = "rescan", + [IWL_RESET_MODE_FUNC_RESET] = "function", + [IWL_RESET_MODE_PROD_RESET] = "product", + }; + char buf[10] = {}; + int mode; + + if (count > sizeof(buf) - 1) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + mode = sysfs_match_string(modes, buf); + if (mode < 0) + return mode; + + if (mode < IWL_RESET_MODE_REMOVE_ONLY) + return -EINVAL; + + iwl_trans_pcie_reset(trans, mode); + + return count; +} + DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_FILE_OPS(fh_reg); DEBUGFS_READ_FILE_OPS(rx_queue); DEBUGFS_WRITE_FILE_OPS(csr); DEBUGFS_READ_WRITE_FILE_OPS(rfkill); DEBUGFS_READ_FILE_OPS(rf); +DEBUGFS_WRITE_FILE_OPS(reset); static const struct file_operations iwl_dbgfs_tx_queue_ops = { .owner = THIS_MODULE, @@ -3059,9 +3321,10 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) DEBUGFS_ADD_FILE(rfkill, dir, 0600); DEBUGFS_ADD_FILE(monitor_data, dir, 0400); DEBUGFS_ADD_FILE(rf, dir, 0400); + DEBUGFS_ADD_FILE(reset, dir, 0200); } -static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) +void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct cont_rec *data = &trans_pcie->fw_mon_data; @@ -3074,10 +3337,11 @@ static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 cmdlen = 0; int i; - for (i = 0; i < trans->txqs.tfd.max_tbs; i++) + for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++) cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); return cmdlen; @@ -3338,15 +3602,14 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) return 0; } -static struct iwl_trans_dump_data * -iwl_trans_pcie_dump_data(struct iwl_trans *trans, - u32 dump_mask, +struct iwl_trans_dump_data * +iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, const struct iwl_dump_sanitize_ops *sanitize_ops, void *sanitize_ctx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; - struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len, num_rbs = 0, monitor_len = 0; @@ -3413,7 +3676,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, data = (void *)dump_data->data; if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { - u16 tfd_size = trans->txqs.tfd.size; + u16 tfd_size = trans_pcie->txqs.tfd.size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); txcmd = (void *)data->data; @@ -3489,7 +3752,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, return dump_data; } -static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) +void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) { if (enable) iwl_enable_interrupts(trans); @@ -3497,7 +3760,7 @@ static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) iwl_disable_interrupts(trans); } -static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) +void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) { u32 inta_addr, sw_err_bit; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -3516,95 +3779,15 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); } -#define IWL_TRANS_COMMON_OPS \ - .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ - .write8 = iwl_trans_pcie_write8, \ - .write32 = iwl_trans_pcie_write32, \ - .read32 = iwl_trans_pcie_read32, \ - .read_prph = iwl_trans_pcie_read_prph, \ - .write_prph = iwl_trans_pcie_write_prph, \ - .read_mem = iwl_trans_pcie_read_mem, \ - .write_mem = iwl_trans_pcie_write_mem, \ - .read_config32 = iwl_trans_pcie_read_config32, \ - .configure = iwl_trans_pcie_configure, \ - .set_pmi = iwl_trans_pcie_set_pmi, \ - .sw_reset = iwl_trans_pcie_sw_reset, \ - .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ - .release_nic_access = iwl_trans_pcie_release_nic_access, \ - .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ - .dump_data = iwl_trans_pcie_dump_data, \ - .d3_suspend = iwl_trans_pcie_d3_suspend, \ - .d3_resume = iwl_trans_pcie_d3_resume, \ - .interrupts = iwl_trans_pci_interrupts, \ - .sync_nmi = iwl_trans_pcie_sync_nmi, \ - .imr_dma_data = iwl_trans_pcie_copy_imr \ - -static const struct iwl_trans_ops trans_ops_pcie = { - IWL_TRANS_COMMON_OPS, - .start_hw = iwl_trans_pcie_start_hw, - .fw_alive = iwl_trans_pcie_fw_alive, - .start_fw = iwl_trans_pcie_start_fw, - .stop_device = iwl_trans_pcie_stop_device, - - .send_cmd = iwl_pcie_enqueue_hcmd, - - .tx = iwl_trans_pcie_tx, - .reclaim = iwl_txq_reclaim, - - .txq_disable = iwl_trans_pcie_txq_disable, - .txq_enable = iwl_trans_pcie_txq_enable, - - .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, - - .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, - - .freeze_txq_timer = iwl_trans_txq_freeze_timer, -#ifdef CONFIG_IWLWIFI_DEBUGFS - .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, -#endif -}; - -static const struct iwl_trans_ops trans_ops_pcie_gen2 = { - IWL_TRANS_COMMON_OPS, - .start_hw = iwl_trans_pcie_start_hw, - .fw_alive = iwl_trans_pcie_gen2_fw_alive, - .start_fw = iwl_trans_pcie_gen2_start_fw, - .stop_device = iwl_trans_pcie_gen2_stop_device, - - .send_cmd = iwl_pcie_gen2_enqueue_hcmd, - - .tx = iwl_txq_gen2_tx, - .reclaim = iwl_txq_reclaim, - - .set_q_ptrs = iwl_txq_set_q_ptrs, - - .txq_alloc = iwl_txq_dyn_alloc, - .txq_free = iwl_txq_dyn_free, - .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, - .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, - .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm, - .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, - .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power, - .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, -#ifdef CONFIG_IWLWIFI_DEBUGFS - .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, -#endif -}; - struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg_trans_params *cfg_trans) { - struct iwl_trans_pcie *trans_pcie; + struct iwl_trans_pcie *trans_pcie, **priv; struct iwl_trans *trans; int ret, addr_size; - const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; - void __iomem * const *table; u32 bar0; - if (!cfg_trans->gen2) - ops = &trans_ops_pcie; - /* reassign our BAR 0 if invalid due to possible runtime PM races */ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) { @@ -3617,13 +3800,73 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (ret) return ERR_PTR(ret); - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg_trans); if (!trans) return ERR_PTR(-ENOMEM); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + if (trans->trans_cfg->gen2) { + trans_pcie->txqs.tfd.addr_size = 64; + trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; + trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); + } else { + trans_pcie->txqs.tfd.addr_size = 36; + trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; + trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd); + } + trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie); + + /* Set a short watchdog for the command queue */ + trans_pcie->txqs.cmd.wdg_timeout = IWL_DEF_WD_TIMEOUT; + + trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); + if (!trans_pcie->txqs.tso_hdr_page) { + ret = -ENOMEM; + goto out_free_trans; + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + trans_pcie->txqs.bc_tbl_size = + sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ; + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + trans_pcie->txqs.bc_tbl_size = + sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210; + else + trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); + /* + * For gen2 devices, we use a single allocation for each byte-count + * table, but they're pretty small (1k) so use a DMA pool that we + * allocate here. + */ + if (trans->trans_cfg->gen2) { + trans_pcie->txqs.bc_pool = + dmam_pool_create("iwlwifi:bc", trans->dev, + trans_pcie->txqs.bc_tbl_size, + 256, 0); + if (!trans_pcie->txqs.bc_pool) { + ret = -ENOMEM; + goto out_free_tso; + } + } + + /* Some things must not change even if the config does */ + WARN_ON(trans_pcie->txqs.tfd.addr_size != + (trans->trans_cfg->gen2 ? 64 : 36)); + + /* Initialize NAPI here - it should be before registering to mac80211 + * in the opmode but after the HW struct is allocated. + */ + trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *)); + if (!trans_pcie->napi_dev) { + ret = -ENOMEM; + goto out_free_tso; + } + /* The private struct in netdev is a pointer to struct iwl_trans_pcie */ + priv = netdev_priv(trans_pcie->napi_dev); + *priv = trans_pcie; + trans_pcie->trans = trans; trans_pcie->opmode_down = true; spin_lock_init(&trans_pcie->irq_lock); @@ -3638,7 +3881,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, WQ_HIGHPRI | WQ_UNBOUND, 0); if (!trans_pcie->rba.alloc_wq) { ret = -ENOMEM; - goto out_free_trans; + goto out_free_ndev; } INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); @@ -3657,7 +3900,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, pci_set_master(pdev); - addr_size = trans->txqs.tfd.addr_size; + addr_size = trans_pcie->txqs.tfd.addr_size; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); if (ret) { ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); @@ -3668,22 +3911,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } - ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); + ret = pcim_request_all_regions(pdev, DRV_NAME); if (ret) { - dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); - goto out_no_pci; - } - - table = pcim_iomap_table(pdev); - if (!table) { - dev_err(&pdev->dev, "pcim_iomap_table failed\n"); - ret = -ENOMEM; + dev_err(&pdev->dev, "Requesting all PCI BARs failed.\n"); goto out_no_pci; } - trans_pcie->hw_base = table[0]; + trans_pcie->hw_base = pcim_iomap(pdev, 0, 0); if (!trans_pcie->hw_base) { - dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); + dev_err(&pdev->dev, "Could not ioremap PCI BAR 0.\n"); ret = -ENODEV; goto out_no_pci; } @@ -3758,6 +3994,10 @@ out_free_ict: iwl_pcie_free_ict(trans); out_no_pci: destroy_workqueue(trans_pcie->rba.alloc_wq); +out_free_ndev: + free_netdev(trans_pcie->napi_dev); +out_free_tso: + free_percpu(trans_pcie->txqs.tso_hdr_page); out_free_trans: iwl_trans_free(trans); return ERR_PTR(ret); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index aabbef114bc2..401919f9fe88 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2023 Intel Corporation + * Copyright (C) 2018-2020, 2023-2025 Intel Corporation */ #include <net/tso.h> #include <linux/tcp.h> @@ -11,7 +11,1185 @@ #include "iwl-io.h" #include "internal.h" #include "fw/api/tx.h" -#include "queue/tx.h" +#include "fw/api/commands.h" +#include "fw/api/datapath.h" +#include "iwl-scd.h" + +static struct page *get_workaround_page(struct iwl_trans *trans, + struct sk_buff *skb) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tso_page_info *info; + struct page **page_ptr; + struct page *ret; + dma_addr_t phys; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + + ret = alloc_page(GFP_ATOMIC); + if (!ret) + return NULL; + + info = IWL_TSO_PAGE_INFO(page_address(ret)); + + /* Create a DMA mapping for the page */ + phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE, + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(trans->dev, phys))) { + __free_page(ret); + return NULL; + } + + /* Store physical address and set use count */ + info->dma_addr = phys; + refcount_set(&info->use_count, 1); + + /* set the chaining pointer to the previous page if there */ + info->next = *page_ptr; + *page_ptr = ret; + + return ret; +} + +/* + * Add a TB and if needed apply the FH HW bug workaround; + * meta != NULL indicates that it's a page mapping and we + * need to dma_unmap_page() and set the meta->tbs bit in + * this case. + */ +static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + dma_addr_t phys, void *virt, + u16 len, struct iwl_cmd_meta *meta, + bool unmap) +{ + dma_addr_t oldphys = phys; + struct page *page; + int ret; + + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + + if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { + ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); + + if (ret < 0) + goto unmap; + + if (meta) + meta->tbs |= BIT(ret); + + ret = 0; + goto trace; + } + + /* + * Work around a hardware bug. If (as expressed in the + * condition above) the TB ends on a 32-bit boundary, + * then the next TB may be accessed with the wrong + * address. + * To work around it, copy the data elsewhere and make + * a new mapping for it so the device will not fail. + */ + + if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) { + ret = -ENOBUFS; + goto unmap; + } + + page = get_workaround_page(trans, skb); + if (!page) { + ret = -ENOMEM; + goto unmap; + } + + memcpy(page_address(page), virt, len); + + /* + * This is a bit odd, but performance does not matter here, what + * matters are the expectations of the calling code and TB cleanup + * function. + * + * As such, if unmap is set, then create another mapping for the TB + * entry as it will be unmapped later. On the other hand, if it is not + * set, then the TB entry will not be unmapped and instead we simply + * reference and sync the mapping that get_workaround_page() created. + */ + if (unmap) { + phys = dma_map_single(trans->dev, page_address(page), len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + } else { + phys = iwl_pcie_get_tso_page_phys(page_address(page)); + dma_sync_single_for_device(trans->dev, phys, len, + DMA_TO_DEVICE); + } + + ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); + if (ret < 0) { + /* unmap the new allocation as single */ + oldphys = phys; + meta = NULL; + goto unmap; + } + + IWL_DEBUG_TX(trans, + "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", + len, (unsigned long long)oldphys, + (unsigned long long)phys); + + ret = 0; +unmap: + if (!unmap) + goto trace; + + if (meta) + dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); +trace: + trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); + + return ret; +} + +static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + struct iwl_cmd_meta *out_meta, + int start_len, + u8 hdr_len, + struct iwl_device_tx_cmd *dev_cmd) +{ +#ifdef CONFIG_INET + struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; + struct ieee80211_hdr *hdr = (void *)skb->data; + unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; + unsigned int mss = skb_shinfo(skb)->gso_size; + unsigned int data_offset = 0; + dma_addr_t start_hdr_phys; + u16 length, amsdu_pad; + u8 *start_hdr; + struct sg_table *sgt; + struct tso_t tso; + + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), + &dev_cmd->hdr, start_len, 0); + + ip_hdrlen = skb_network_header_len(skb); + snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); + total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; + amsdu_pad = 0; + + /* total amount of header we may need for this A-MSDU */ + hdr_room = DIV_ROUND_UP(total_len, mss) * + (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); + + /* Our device supports 9 segments at most, it will fit in 1 page */ + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, + snap_ip_tcp_hdrlen + hdr_len); + if (!sgt) + return -ENOMEM; + + start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr); + + /* + * Pull the ieee80211 header to be able to use TSO core, + * we will restore it for the tx_status flow. + */ + skb_pull(skb, hdr_len); + + /* + * Remove the length of all the headers that we don't actually + * have in the MPDU by themselves, but that we duplicate into + * all the different MSDUs inside the A-MSDU. + */ + le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); + + tso_start(skb, &tso); + + while (total_len) { + /* this is the data left for this subframe */ + unsigned int data_left = min_t(unsigned int, mss, total_len); + unsigned int tb_len; + dma_addr_t tb_phys; + u8 *pos_hdr = start_hdr; + + total_len -= data_left; + + memset(pos_hdr, 0, amsdu_pad); + pos_hdr += amsdu_pad; + amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + + data_left)) & 0x3; + ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr)); + pos_hdr += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr)); + pos_hdr += ETH_ALEN; + + length = snap_ip_tcp_hdrlen + data_left; + *((__be16 *)pos_hdr) = cpu_to_be16(length); + pos_hdr += sizeof(length); + + /* + * This will copy the SNAP as well which will be considered + * as MAC header. + */ + tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len); + + pos_hdr += snap_ip_tcp_hdrlen; + + tb_len = pos_hdr - start_hdr; + tb_phys = iwl_pcie_get_tso_page_phys(start_hdr); + + /* + * No need for _with_wa, this is from the TSO page and + * we leave some space at the end of it so can't hit + * the buggy scenario. + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, + tb_phys, tb_len); + /* add this subframe's headers' length to the tx_cmd */ + le16_add_cpu(&tx_cmd->len, tb_len); + + /* prepare the start_hdr for the next subframe */ + start_hdr = pos_hdr; + + /* put the payload */ + while (data_left) { + int ret; + + tb_len = min_t(unsigned int, tso.size, data_left); + tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, + tb_len); + /* Not a real mapping error, use direct comparison */ + if (unlikely(tb_phys == DMA_MAPPING_ERROR)) + goto out_err; + + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, + tb_phys, tso.data, + tb_len, NULL, false); + if (ret) + goto out_err; + + data_left -= tb_len; + data_offset += tb_len; + tso_build_data(skb, &tso, tb_len); + } + } + + dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room, + DMA_TO_DEVICE); + + /* re -add the WiFi header */ + skb_push(skb, hdr_len); + + return 0; + +out_err: +#endif + return -EINVAL; +} + +static struct +iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_tx_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len) +{ + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); + dma_addr_t tb_phys; + int len; + void *tb1_addr; + + tb_phys = iwl_txq_get_first_tb_dma(txq, idx); + + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; + + /* do not align A-MSDU to dword as the subframe header aligns it */ + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); + + if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta, + len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd)) + goto out_err; + + /* building the A-MSDU might have changed this data, memcpy it now */ + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); + return tfd; + +out_err: + iwl_pcie_free_tso_pages(trans, skb, out_meta); + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + struct iwl_cmd_meta *out_meta) +{ + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + unsigned int fragsz = skb_frag_size(frag); + int ret; + + if (!fragsz) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + fragsz, DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb_frag_address(frag), + fragsz, out_meta, true); + if (ret) + return ret; + } + + return 0; +} + +static struct +iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_tx_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len, + bool pad) +{ + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); + dma_addr_t tb_phys; + int len, tb1_len, tb2_len; + void *tb1_addr; + struct sk_buff *frag; + + tb_phys = iwl_txq_get_first_tb_dma(txq, idx); + + /* The first TB points to bi-directional DMA data */ + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); + + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; + + if (pad) + tb1_len = ALIGN(len, 4); + else + tb1_len = len; + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, + IWL_FIRST_TB_SIZE + tb1_len, hdr_len); + + /* set up TFD's third entry to point to remainder of skb's head */ + tb2_len = skb_headlen(skb) - hdr_len; + + if (tb2_len > 0) { + int ret; + + tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, + tb2_len, DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb->data + hdr_len, tb2_len, + NULL, true); + if (ret) + goto out_err; + } + + if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) + goto out_err; + + skb_walk_frags(skb, frag) { + int ret; + + tb_phys = dma_map_single(trans->dev, frag->data, + skb_headlen(frag), DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + frag->data, + skb_headlen(frag), NULL, + true); + if (ret) + goto out_err; + if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) + goto out_err; + } + + return tfd; + +out_err: + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +static +struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_tx_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); + int len, hdr_len; + bool amsdu; + + /* There must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd_gen2, dram_info) > + IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd_gen3, dram_info) > + IWL_FIRST_TB_SIZE); + + memset(tfd, 0, sizeof(*tfd)); + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + len = sizeof(struct iwl_tx_cmd_gen2); + else + len = sizeof(struct iwl_tx_cmd_gen3); + + amsdu = ieee80211_is_data_qos(hdr->frame_control) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); + + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + /* + * Only build A-MSDUs here if doing so by GSO, otherwise it may be + * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been + * built in the higher layers already. + */ + if (amsdu && skb_shinfo(skb)->gso_size) + return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, + out_meta, hdr_len, len); + return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, + hdr_len, len, !amsdu); +} + +int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) +{ + unsigned int max; + unsigned int used; + + /* + * To avoid ambiguity between empty and completely full queues, there + * should always be less than max_tfd_queue_size elements in the queue. + * If q->n_window is smaller than max_tfd_queue_size, there is no need + * to reserve any queue entries for this purpose. + */ + if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) + max = q->n_window; + else + max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; + + /* + * max_tfd_queue_size is a power of 2, so the following is equivalent to + * modulo by max_tfd_queue_size and is well defined. + */ + used = (q->write_ptr - q->read_ptr) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + + if (WARN_ON(used > max)) + return 0; + + return max - used; +} + +/* + * iwl_pcie_gen2_update_byte_tbl - Set up entry in Tx byte-count array + */ +static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + u8 filled_tfd_size, num_fetch_chunks; + u16 len = byte_cnt; + __le16 bc_ent; + + if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) + return; + + filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + + num_tbs * sizeof(struct iwl_tfh_tb); + /* + * filled_tfd_size contains the number of filled bytes in the TFD. + * Dividing it by 64 will give the number of chunks to fetch + * to SRAM- 0 for one chunk, 1 for 2 and so on. + * If, for example, TFD contains only 3 TBs then 32 bytes + * of the TFD are used, and only one chunk of 64 bytes should + * be fetched + */ + num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; + + /* Starting from AX210, the HW expects bytes */ + WARN_ON(trans_pcie->txqs.bc_table_dword); + WARN_ON(len > 0x3FFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); + scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; + } else { + struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; + + /* Before AX210, the HW expects DW */ + WARN_ON(!trans_pcie->txqs.bc_table_dword); + len = DIV_ROUND_UP(len, 4); + WARN_ON(len > 0xFFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); + scd_bc_tbl->tfd_offset[idx] = bc_ent; + } +} + +static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd) +{ + return le16_to_cpu(tfd->num_tbs) & 0x1f; +} + +int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, + dma_addr_t addr, u16 len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int idx = iwl_txq_gen2_get_num_tbs(tfd); + struct iwl_tfh_tb *tb; + + /* Only WARN here so we know about the issue, but we mess up our + * unmap path because not every place currently checks for errors + * returned from this function - it can only return an error if + * there's no more space, and so when we know there is enough we + * don't always check ... + */ + WARN(iwl_txq_crosses_4g_boundary(addr, len), + "possible DMA problem with iova:0x%llx, len:%d\n", + (unsigned long long)addr, len); + + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) + return -EINVAL; + tb = &tfd->tbs[idx]; + + /* Each TFD can point to a maximum max_tbs Tx buffers */ + if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Error can not send more than %d chunks\n", + trans_pcie->txqs.tfd.max_tbs); + return -EINVAL; + } + + put_unaligned_le64(addr, &tb->addr); + tb->tb_len = cpu_to_le16(len); + + tfd->num_tbs = cpu_to_le16(idx + 1); + + return idx; +} + +void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_tfh_tfd *tfd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i, num_tbs; + + /* Sanity check on number of chunks */ + num_tbs = iwl_txq_gen2_get_num_tbs(tfd); + + if (num_tbs > trans_pcie->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); + return; + } + + /* TB1 is mapped directly, the rest is the TSO page and SG list. */ + if (meta->sg_offset) + num_tbs = 2; + + /* first TB is never freed - it's the bidirectional DMA data */ + for (i = 1; i < num_tbs; i++) { + if (meta->tbs & BIT(i)) + dma_unmap_page(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + } + + iwl_txq_set_tfd_invalid_gen2(trans, tfd); +} + +static void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +{ + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ + int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); + struct sk_buff *skb; + + lockdep_assert_held(&txq->lock); + + if (!txq->entries) + return; + + iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, + iwl_txq_get_tfd(trans, txq, idx)); + + skb = txq->entries[idx].skb; + + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; + } +} + +/* + * iwl_txq_inc_wr_ptr - Send new write index to hardware + */ +static void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) +{ + lockdep_assert_held(&txq->lock); + + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); + + /* + * if not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); +} + +int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_cmd_meta *out_meta; + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; + u16 cmd_len; + int idx; + void *tfd; + + if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, + "queue %d out of range", txq_id)) + return -EINVAL; + + if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used), + "TX on unused queue %d\n", txq_id)) + return -EINVAL; + + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) && + __skb_linearize(skb)) + return -ENOMEM; + + spin_lock(&txq->lock); + + if (iwl_txq_space(trans, txq) < txq->high_mark) { + iwl_txq_stop(trans, txq); + + /* don't put the packet on the ring, if there is no room */ + if (unlikely(iwl_txq_space(trans, txq) < 3)) { + struct iwl_device_tx_cmd **dev_cmd_ptr; + + dev_cmd_ptr = (void *)((u8 *)skb->cb + + trans_pcie->txqs.dev_cmd_offs); + + *dev_cmd_ptr = dev_cmd; + __skb_queue_tail(&txq->overflow_q, skb); + spin_unlock(&txq->lock); + return 0; + } + } + + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + + /* Set up driver data for this TFD */ + txq->entries[idx].skb = skb; + txq->entries[idx].cmd = dev_cmd; + + dev_cmd->hdr.sequence = + cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(idx))); + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_meta = &txq->entries[idx].meta; + memset(out_meta, 0, sizeof(*out_meta)); + + tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); + if (!tfd) { + spin_unlock(&txq->lock); + return -1; + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen3->len); + } else { + struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen2->len); + } + + /* Set up entry for this TFD in Tx byte-count array */ + iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, + iwl_txq_gen2_get_num_tbs(tfd)); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + + /* Tell device the write index *just past* this latest filled TFD */ + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); + iwl_txq_inc_wr_ptr(trans, txq); + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually. + */ + spin_unlock(&txq->lock); + return 0; +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/* + * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's + */ +static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; + + spin_lock_bh(&txq->reclaim_lock); + spin_lock(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", + txq_id, txq->read_ptr); + + if (txq_id != trans_pcie->txqs.cmd.q_id) { + int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); + struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta; + struct sk_buff *skb = txq->entries[idx].skb; + + if (!WARN_ON_ONCE(!skb)) + iwl_pcie_free_tso_pages(trans, skb, cmd_meta); + } + iwl_txq_gen2_free_tfd(trans, txq); + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); + } + + while (!skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); + + iwl_op_mode_free_skb(trans->op_mode, skb); + } + + spin_unlock(&txq->lock); + spin_unlock_bh(&txq->reclaim_lock); + + /* just in case - this queue may have been stopped */ + iwl_trans_pcie_wake_queue(trans, txq); +} + +static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct device *dev = trans->dev; + + /* De-alloc circular buffer of TFDs */ + if (txq->tfds) { + dma_free_coherent(dev, + trans_pcie->txqs.tfd.size * txq->n_window, + txq->tfds, txq->dma_addr); + dma_free_coherent(dev, + sizeof(*txq->first_tb_bufs) * txq->n_window, + txq->first_tb_bufs, txq->first_tb_dma); + } + + kfree(txq->entries); + if (txq->bc_tbl.addr) + dma_pool_free(trans_pcie->txqs.bc_pool, + txq->bc_tbl.addr, txq->bc_tbl.dma); + kfree(txq); +} + +/* + * iwl_pcie_txq_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq; + int i; + + if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, + "queue %d out of range", txq_id)) + return; + + txq = trans_pcie->txqs.txq[txq_id]; + + if (WARN_ON(!txq)) + return; + + iwl_txq_gen2_unmap(trans, txq_id); + + /* De-alloc array of command/tx buffers */ + if (txq_id == trans_pcie->txqs.cmd.q_id) + for (i = 0; i < txq->n_window; i++) { + kfree_sensitive(txq->entries[i].cmd); + kfree_sensitive(txq->entries[i].free_buf); + } + del_timer_sync(&txq->stuck_timer); + + iwl_txq_gen2_free_memory(trans, txq); + + trans_pcie->txqs.txq[txq_id] = NULL; + + clear_bit(txq_id, trans_pcie->txqs.queue_used); +} + +static struct iwl_txq * +iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t bc_tbl_size, bc_tbl_entries; + struct iwl_txq *txq; + int ret; + + WARN_ON(!trans_pcie->txqs.bc_tbl_size); + + bc_tbl_size = trans_pcie->txqs.bc_tbl_size; + bc_tbl_entries = bc_tbl_size / sizeof(u16); + + if (WARN_ON(size > bc_tbl_entries)) + return ERR_PTR(-EINVAL); + + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) + return ERR_PTR(-ENOMEM); + + txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->txqs.bc_pool, GFP_KERNEL, + &txq->bc_tbl.dma); + if (!txq->bc_tbl.addr) { + IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); + kfree(txq); + return ERR_PTR(-ENOMEM); + } + + ret = iwl_pcie_txq_alloc(trans, txq, size, false); + if (ret) { + IWL_ERR(trans, "Tx queue alloc failed\n"); + goto error; + } + ret = iwl_txq_init(trans, txq, size, false); + if (ret) { + IWL_ERR(trans, "Tx queue init failed\n"); + goto error; + } + + txq->wd_timeout = msecs_to_jiffies(timeout); + + return txq; + +error: + iwl_txq_gen2_free_memory(trans, txq); + return ERR_PTR(ret); +} + +static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_host_cmd *hcmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_queue_cfg_rsp *rsp; + int ret, qid; + u32 wr_ptr; + + if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != + sizeof(*rsp))) { + ret = -EINVAL; + goto error_free_resp; + } + + rsp = (void *)hcmd->resp_pkt->data; + qid = le16_to_cpu(rsp->queue_number); + wr_ptr = le16_to_cpu(rsp->write_pointer); + + if (qid >= ARRAY_SIZE(trans_pcie->txqs.txq)) { + WARN_ONCE(1, "queue index %d unsupported", qid); + ret = -EIO; + goto error_free_resp; + } + + if (test_and_set_bit(qid, trans_pcie->txqs.queue_used)) { + WARN_ONCE(1, "queue %d already used", qid); + ret = -EIO; + goto error_free_resp; + } + + if (WARN_ONCE(trans_pcie->txqs.txq[qid], + "queue %d already allocated\n", qid)) { + ret = -EIO; + goto error_free_resp; + } + + txq->id = qid; + trans_pcie->txqs.txq[qid] = txq; + wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + + /* Place first TFD at index corresponding to start sequence number */ + txq->read_ptr = wr_ptr; + txq->write_ptr = wr_ptr; + + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); + + iwl_free_resp(hcmd); + return qid; + +error_free_resp: + iwl_free_resp(hcmd); + iwl_txq_gen2_free_memory(trans, txq); + return ret; +} + +int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, + u8 tid, int size, unsigned int timeout) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq; + union { + struct iwl_tx_queue_cfg_cmd old; + struct iwl_scd_queue_cfg_cmd new; + } cmd; + struct iwl_host_cmd hcmd = { + .flags = CMD_WANT_SKB, + }; + int ret; + + /* take the min with bytecount table entries allowed */ + size = min_t(u32, size, trans_pcie->txqs.bc_tbl_size / sizeof(u16)); + /* but must be power of 2 values for calculating read/write pointers */ + size = rounddown_pow_of_two(size); + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && + trans->hw_rev_step == SILICON_A_STEP) { + size = 4096; + txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); + } else { + do { + txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); + if (!IS_ERR(txq)) + break; + + IWL_DEBUG_TX_QUEUES(trans, + "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %ld\n", + size, sta_mask, tid, + PTR_ERR(txq)); + size /= 2; + } while (size >= 16); + } + + if (IS_ERR(txq)) + return PTR_ERR(txq); + + if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) { + memset(&cmd.old, 0, sizeof(cmd.old)); + cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); + cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); + cmd.old.tid = tid; + + if (hweight32(sta_mask) != 1) { + ret = -EINVAL; + goto error; + } + cmd.old.sta_id = ffs(sta_mask) - 1; + + hcmd.id = SCD_QUEUE_CFG; + hcmd.len[0] = sizeof(cmd.old); + hcmd.data[0] = &cmd.old; + } else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) { + memset(&cmd.new, 0, sizeof(cmd.new)); + cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); + cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); + cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + cmd.new.u.add.flags = cpu_to_le32(flags); + cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); + cmd.new.u.add.tid = tid; + + hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); + hcmd.len[0] = sizeof(cmd.new); + hcmd.data[0] = &cmd.new; + } else { + ret = -EOPNOTSUPP; + goto error; + } + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + goto error; + + return iwl_pcie_txq_alloc_response(trans, txq, &hcmd); + +error: + iwl_txq_gen2_free_memory(trans, txq); + return ret; +} + +void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (WARN(queue >= IWL_MAX_TVQM_QUEUES, + "queue %d out of range", queue)) + return; + + /* + * Upon HW Rfkill - we stop the device, and then stop the queues + * in the op_mode. Just for the sake of the simplicity of the op_mode, + * allow the op_mode to call txq_disable after it already called + * stop_device. + */ + if (!test_and_clear_bit(queue, trans_pcie->txqs.queue_used)) { + WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), + "queue %d not used", queue); + return; + } + + iwl_txq_gen2_free(trans, queue); + + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); +} + +void iwl_txq_gen2_tx_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); + + /* Free all TX queues */ + for (i = 0; i < ARRAY_SIZE(trans_pcie->txqs.txq); i++) { + if (!trans_pcie->txqs.txq[i]) + continue; + + iwl_txq_gen2_free(trans, i); + } +} + +int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *queue; + int ret; + + /* alloc and init the tx queue */ + if (!trans_pcie->txqs.txq[txq_id]) { + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) { + IWL_ERR(trans, "Not enough memory for tx queue\n"); + return -ENOMEM; + } + trans_pcie->txqs.txq[txq_id] = queue; + ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true); + if (ret) { + IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); + goto error; + } + } else { + queue = trans_pcie->txqs.txq[txq_id]; + } + + ret = iwl_txq_init(trans, queue, queue_size, + (txq_id == trans_pcie->txqs.cmd.q_id)); + if (ret) { + IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); + goto error; + } + trans_pcie->txqs.txq[txq_id]->id = txq_id; + set_bit(txq_id, trans_pcie->txqs.queue_used); + + return 0; + +error: + iwl_txq_gen2_tx_free(trans); + return ret; +} /*************** HOST COMMAND QUEUE FUNCTIONS *****/ @@ -28,7 +1206,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; void *dup_buf = NULL; @@ -122,7 +1300,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, spin_unlock_irqrestore(&txq->lock, flags); IWL_ERR(trans, "No space in command queue\n"); - iwl_op_mode_cmd_queue_full(trans->op_mode); + iwl_op_mode_nic_error(trans->op_mode, + IWL_ERR_TYPE_CMD_QUEUE_FULL); + iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL); idx = -ENOSPC; goto free_dup_buf; } @@ -130,7 +1310,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; - /* re-initialize to NULL */ + /* re-initialize, this also marks the SG list as unused */ memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; @@ -143,7 +1323,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -191,7 +1371,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); + cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 6c2b37e56c78..7c1dd5cc084a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,16 +1,22 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include <linux/etherdevice.h> #include <linux/ieee80211.h> +#include <linux/dmapool.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/tcp.h> #include <net/ip6_checksum.h> #include <net/tso.h> +#include "fw/api/commands.h" +#include "fw/api/datapath.h" +#include "fw/api/debug.h" +#include "iwl-fh.h" #include "iwl-debug.h" #include "iwl-csr.h" #include "iwl-prph.h" @@ -72,6 +78,7 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; int txq_id = txq->id; @@ -84,7 +91,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, * 3. there is a chance that the NIC is asleep */ if (!trans->trans_cfg->base_params->shadow_reg_enable && - txq_id != trans->txqs.cmd.q_id && + txq_id != trans_pcie->txqs.cmd.q_id && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* * wake up nic if it's powered down ... @@ -115,12 +122,13 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = trans->txqs.txq[i]; + struct iwl_txq *txq = trans_pcie->txqs.txq[i]; - if (!test_bit(i, trans->txqs.queue_used)) + if (!test_bit(i, trans_pcie->txqs.queue_used)) continue; spin_lock_bh(&txq->lock); @@ -132,23 +140,43 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) } } +static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd, + u8 idx, dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + hi_n_len |= iwl_get_dma_hi_addr(addr); + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); void *tfd; u32 num_tbs; - tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr; + tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr; if (reset) - memset(tfd, 0, trans->txqs.tfd.size); + memset(tfd, 0, trans_pcie->txqs.tfd.size); - num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); + num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd); /* Each TFD can point to a maximum max_tbs Tx buffers */ - if (num_tbs >= trans->txqs.tfd.max_tbs) { + if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", - trans->txqs.tfd.max_tbs); + trans_pcie->txqs.tfd.max_tbs); return -EINVAL; } @@ -156,7 +184,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, "Unaligned address = %llx\n", (unsigned long long)addr)) return -EINVAL; - iwl_pcie_gen1_tfd_set_tb(trans, tfd, num_tbs, addr, len); + iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len); return num_tbs; } @@ -181,36 +209,206 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) spin_unlock(&trans_pcie->reg_lock); } +static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans, + struct page *page) +{ + struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page)); + + /* Decrease internal use count and unmap/free page if needed */ + if (refcount_dec_and_test(&info->use_count)) { + dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE, + DMA_TO_DEVICE); + + __free_page(page); + } +} + +void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct page **page_ptr; + struct page *next; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + next = *page_ptr; + *page_ptr = NULL; + + while (next) { + struct iwl_tso_page_info *info; + struct page *tmp = next; + + info = IWL_TSO_PAGE_INFO(page_address(next)); + next = info->next; + + /* Unmap the scatter gather list that is on the last page */ + if (!next && cmd_meta->sg_offset) { + struct sg_table *sgt; + + sgt = (void *)((u8 *)page_address(tmp) + + cmd_meta->sg_offset); + + dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0); + } + + iwl_pcie_free_and_unmap_tso_page(trans, tmp); + } +} + +static inline dma_addr_t +iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + dma_addr_t addr; + dma_addr_t hi_len; + + addr = get_unaligned_le32(&tb->lo); + + if (sizeof(dma_addr_t) <= sizeof(u32)) + return addr; + + hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; + + /* + * shift by 16 twice to avoid warnings on 32-bit + * (where this code never runs anyway due to the + * if statement above) + */ + return addr | ((hi_len << 16) << 16); +} + +static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, + struct iwl_tfd *tfd) +{ + tfd->num_tbs = 0; + + iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma, + trans->invalid_tx_cmd.size); +} + +static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_txq *txq, int index) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i, num_tbs; + struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); + + /* Sanity check on number of chunks */ + num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd); + + if (num_tbs > trans_pcie->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* TB1 is mapped directly, the rest is the TSO page and SG list. */ + if (meta->sg_offset) + num_tbs = 2; + + /* first TB is never freed - it's the bidirectional DMA data */ + + for (i = 1; i < num_tbs; i++) { + if (meta->tbs & BIT(i)) + dma_unmap_page(trans->dev, + iwl_txq_gen1_tfd_tb_get_addr(tfd, i), + iwl_txq_gen1_tfd_tb_get_len(trans, + tfd, i), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + iwl_txq_gen1_tfd_tb_get_addr(tfd, i), + iwl_txq_gen1_tfd_tb_get_len(trans, + tfd, i), + DMA_TO_DEVICE); + } + + meta->tbs = 0; + + iwl_txq_set_tfd_invalid_gen1(trans, tfd); +} + +/** + * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @trans: transport private data + * @txq: tx queue + * @read_ptr: the TXQ read_ptr to free + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, + int read_ptr) +{ + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ + int idx = iwl_txq_get_cmd_index(txq, read_ptr); + struct sk_buff *skb; + + lockdep_assert_held(&txq->reclaim_lock); + + if (!txq->entries) + return; + + /* We have only q->n_window txq->entries, but we use + * TFD_QUEUE_SIZE_MAX tfds + */ + if (trans->trans_cfg->gen2) + iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, + iwl_txq_get_tfd(trans, txq, read_ptr)); + else + iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, + txq, read_ptr); + + /* free SKB */ + skb = txq->entries[idx].skb; + + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; + } +} + /* * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's */ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; if (!txq) { IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); return; } - spin_lock_bh(&txq->lock); + spin_lock_bh(&txq->reclaim_lock); + spin_lock(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); - if (txq_id != trans->txqs.cmd.q_id) { + if (txq_id != trans_pcie->txqs.cmd.q_id) { struct sk_buff *skb = txq->entries[txq->read_ptr].skb; + struct iwl_cmd_meta *cmd_meta = + &txq->entries[txq->read_ptr].meta; if (WARN_ON_ONCE(!skb)) continue; - iwl_txq_free_tso_page(trans, skb); + iwl_pcie_free_tso_pages(trans, skb, cmd_meta); } - iwl_txq_free_tfd(trans, txq); + iwl_txq_free_tfd(trans, txq, txq->read_ptr); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr && - txq_id == trans->txqs.cmd.q_id) + txq_id == trans_pcie->txqs.cmd.q_id) iwl_pcie_clear_cmd_in_flight(trans); } @@ -220,10 +418,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_op_mode_free_skb(trans->op_mode, skb); } - spin_unlock_bh(&txq->lock); + spin_unlock(&txq->lock); + spin_unlock_bh(&txq->reclaim_lock); /* just in case - this queue may have been stopped */ - iwl_wake_queue(trans, txq); + iwl_trans_pcie_wake_queue(trans, txq); } /* @@ -236,7 +435,8 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) */ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; struct device *dev = trans->dev; int i; @@ -246,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) iwl_pcie_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ - if (txq_id == trans->txqs.cmd.q_id) + if (txq_id == trans_pcie->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { kfree_sensitive(txq->entries[i].cmd); kfree_sensitive(txq->entries[i].free_buf); @@ -255,7 +455,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, - trans->txqs.tfd.size * + trans_pcie->txqs.tfd.size * trans->trans_cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; @@ -285,9 +485,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); /* make sure all queue are not stopped/used */ - memset(trans->txqs.queue_stopped, 0, - sizeof(trans->txqs.queue_stopped)); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + memset(trans_pcie->txqs.queue_stopped, 0, + sizeof(trans_pcie->txqs.queue_stopped)); + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); @@ -301,7 +502,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) NULL, clear_dwords); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, - trans->txqs.scd_bc_tbls.dma >> 10); + trans_pcie->txqs.scd_bc_tbls.dma >> 10); /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. @@ -309,9 +510,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) if (trans->trans_cfg->base_params->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); - iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, - trans->txqs.cmd.fifo, - trans->txqs.cmd.wdg_timeout); + iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id, + trans_pcie->txqs.cmd.fifo, + trans_pcie->txqs.cmd.wdg_timeout); /* Activate all Tx DMA/FIFO channels */ iwl_scd_activate_fifos(trans); @@ -347,7 +548,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; if (trans->trans_cfg->gen2) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), @@ -422,9 +623,10 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) * queues. This happens when we have an rfkill interrupt. * Since we stop Tx altogether - mark the queues as stopped. */ - memset(trans->txqs.queue_stopped, 0, - sizeof(trans->txqs.queue_stopped)); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + memset(trans_pcie->txqs.queue_stopped, 0, + sizeof(trans_pcie->txqs.queue_stopped)); + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); /* This can happen: start_hw, stop_device */ if (!trans_pcie->txq_memory) @@ -448,7 +650,8 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) int txq_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); /* Tx queues */ if (trans_pcie->txq_memory) { @@ -456,7 +659,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); - trans->txqs.txq[txq_id] = NULL; + trans_pcie->txqs.txq[txq_id] = NULL; } } @@ -465,7 +668,135 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); - iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); + iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls); +} + +void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) +{ + u32 txq_id = txq->id; + u32 status; + bool active; + u8 fifo; + + if (trans->trans_cfg->gen2) { + IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, + txq->read_ptr, txq->write_ptr); + /* TODO: access new SCD registers and dump them */ + return; + } + + status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); + fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; + active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); + + IWL_ERR(trans, + "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", + txq_id, active ? "" : "in", fifo, + jiffies_to_msecs(txq->wd_timeout), + txq->read_ptr, txq->write_ptr, + iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); +} + +static void iwl_txq_stuck_timer(struct timer_list *t) +{ + struct iwl_txq *txq = from_timer(txq, t, stuck_timer); + struct iwl_trans *trans = txq->trans; + + spin_lock(&txq->lock); + /* check if triggered erroneously */ + if (txq->read_ptr == txq->write_ptr) { + spin_unlock(&txq->lock); + return; + } + spin_unlock(&txq->lock); + + iwl_txq_log_scd_error(trans, txq); + + iwl_force_nmi(trans); +} + +int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t num_entries = trans->trans_cfg->gen2 ? + slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; + size_t tfd_sz; + size_t tb0_buf_sz; + int i; + + if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) + return -EINVAL; + + if (WARN_ON(txq->entries || txq->tfds)) + return -EINVAL; + + tfd_sz = trans_pcie->txqs.tfd.size * num_entries; + + timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); + txq->trans = trans; + + txq->n_window = slots_num; + + txq->entries = kcalloc(slots_num, + sizeof(struct iwl_pcie_txq_entry), + GFP_KERNEL); + + if (!txq->entries) + goto error; + + if (cmd_queue) + for (i = 0; i < slots_num; i++) { + txq->entries[i].cmd = + kmalloc(sizeof(struct iwl_device_cmd), + GFP_KERNEL); + if (!txq->entries[i].cmd) + goto error; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device + */ + txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, + &txq->dma_addr, GFP_KERNEL); + if (!txq->tfds) + goto error; + + BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); + + tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; + + txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, + &txq->first_tb_dma, + GFP_KERNEL); + if (!txq->first_tb_bufs) + goto err_free_tfds; + + for (i = 0; i < num_entries; i++) { + void *tfd = iwl_txq_get_tfd(trans, txq, i); + + if (trans->trans_cfg->gen2) + iwl_txq_set_tfd_invalid_gen2(trans, tfd); + else + iwl_txq_set_tfd_invalid_gen1(trans, tfd); + } + + return 0; +err_free_tfds: + dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); + txq->tfds = NULL; +error: + if (txq->entries && cmd_queue) + for (i = 0; i < slots_num; i++) + kfree(txq->entries[i].cmd); + kfree(txq->entries); + txq->entries = NULL; + + return -ENOMEM; } /* @@ -491,7 +822,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) goto error; } - ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, + ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls, bc_tbls_size); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); @@ -517,7 +848,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); + bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, @@ -525,14 +856,14 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_ba_txq_size); - trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; - ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, - cmd_queue); + trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; + ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id], + slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } - trans->txqs.txq[txq_id]->id = txq_id; + trans_pcie->txqs.txq[txq_id]->id = txq_id; } return 0; @@ -543,6 +874,69 @@ error: return ret; } +/* + * iwl_queue_init - Initialize queue's high/low-water and read/write indexes + */ +static int iwl_queue_init(struct iwl_txq *q, int slots_num) +{ + q->n_window = slots_num; + + /* slots_num must be power-of-two size, otherwise + * iwl_txq_get_cmd_index is broken. + */ + if (WARN_ON(!is_power_of_2(slots_num))) + return -EINVAL; + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->write_ptr = 0; + q->read_ptr = 0; + + return 0; +} + +int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue) +{ + u32 tfd_queue_max_size = + trans->trans_cfg->base_params->max_tfd_queue_size; + int ret; + + txq->need_update = false; + + /* max_tfd_queue_size must be power-of-two size, otherwise + * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. + */ + if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), + "Max tfd queue size must be a power of two, but is %d", + tfd_queue_max_size)) + return -EINVAL; + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + ret = iwl_queue_init(txq, slots_num); + if (ret) + return ret; + + spin_lock_init(&txq->lock); + spin_lock_init(&txq->reclaim_lock); + + if (cmd_queue) { + static struct lock_class_key iwl_txq_cmd_queue_lock_class; + + lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); + } + + __skb_queue_head_init(&txq->overflow_q); + + return 0; +} + int iwl_pcie_tx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -571,7 +965,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); + bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, @@ -579,7 +973,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_ba_txq_size); - ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, + ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); @@ -593,7 +987,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) * Circular buffer (TFD queue in DRAM) physical base address */ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - trans->txqs.txq[txq_id]->dma_addr >> 8); + trans_pcie->txqs.txq[txq_id]->dma_addr >> 8); } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); @@ -641,6 +1035,42 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, return 0; } +static void iwl_txq_progress(struct iwl_txq *txq) +{ + lockdep_assert_held(&txq->lock); + + if (!txq->wd_timeout) + return; + + /* + * station is asleep and we send data - that must + * be uAPSD or PS-Poll. Don't rearm the timer. + */ + if (txq->frozen) + return; + + /* + * if empty delete timer, otherwise move timer forward + * since we're making progress on this queue + */ + if (txq->read_ptr == txq->write_ptr) + del_timer(&txq->stuck_timer); + else + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); +} + +static inline bool iwl_txq_used(const struct iwl_txq *q, int i, + int read_ptr, int write_ptr) +{ + int index = iwl_txq_get_cmd_index(q, i); + int r = iwl_txq_get_cmd_index(q, read_ptr); + int w = iwl_txq_get_cmd_index(q, write_ptr); + + return w >= r ? + (index >= r && index < w) : + !(index < r && index >= w); +} + /* * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd * @@ -650,7 +1080,8 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, */ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; int nfreed = 0; u16 r; @@ -660,8 +1091,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) r = iwl_txq_get_cmd_index(txq, txq->read_ptr); if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || - (!iwl_txq_used(txq, idx))) { - WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), + (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) { + WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, trans->trans_cfg->base_params->max_tfd_queue_size, @@ -720,11 +1151,11 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, unsigned int wdg_timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; int fifo = -1; bool scd_bug = false; - if (test_and_set_bit(txq_id, trans->txqs.queue_used)) + if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); txq->wd_timeout = msecs_to_jiffies(wdg_timeout); @@ -733,7 +1164,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, fifo = cfg->fifo; /* Disable the scheduler prior configuring the cmd queue */ - if (txq_id == trans->txqs.cmd.q_id && + if (txq_id == trans_pcie->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, 0); @@ -741,7 +1172,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, iwl_scd_txq_set_inactive(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD */ - if (txq_id != trans->txqs.cmd.q_id) + if (txq_id != trans_pcie->txqs.cmd.q_id) iwl_scd_txq_set_chain(trans, txq_id); if (cfg->aggregate) { @@ -811,7 +1242,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, SCD_QUEUE_STTS_REG_MSK); /* enable the scheduler for this queue (only) */ - if (txq_id == trans->txqs.cmd.q_id && + if (txq_id == trans_pcie->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, BIT(txq_id)); @@ -830,7 +1261,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; txq->ampdu = !shared_mode; } @@ -843,8 +1275,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, SCD_TX_STTS_QUEUE_OFFSET(txq_id); static const u32 zero_val[4] = {}; - trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; - trans->txqs.txq[txq_id]->frozen = false; + trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0; + trans_pcie->txqs.txq[txq_id]->frozen = false; /* * Upon HW Rfkill - we stop the device, and then stop the queues @@ -852,7 +1284,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, * allow the op_mode to call txq_disable after it already called * stop_device. */ - if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { + if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", txq_id); return; @@ -866,7 +1298,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, } iwl_pcie_txq_unmap(trans, txq_id); - trans->txqs.txq[txq_id]->ampdu = false; + trans_pcie->txqs.txq[txq_id]->ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } @@ -875,12 +1307,13 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = trans->txqs.txq[i]; + struct iwl_txq *txq = trans_pcie->txqs.txq[i]; - if (i == trans->txqs.cmd.q_id) + if (i == trans_pcie->txqs.cmd.q_id) continue; /* we skip the command queue (obviously) so it's OK to nest */ @@ -912,7 +1345,8 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; void *dup_buf = NULL; @@ -1015,7 +1449,9 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, spin_unlock_irqrestore(&txq->lock, flags); IWL_ERR(trans, "No space in command queue\n"); - iwl_op_mode_cmd_queue_full(trans->op_mode); + iwl_op_mode_nic_error(trans->op_mode, + IWL_ERR_TYPE_CMD_QUEUE_FULL); + iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL); idx = -ENOSPC; goto free_dup_buf; } @@ -1024,7 +1460,8 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; - memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + /* re-initialize, this also marks the SG list as unused */ + memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; @@ -1038,7 +1475,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -1046,7 +1483,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } else { out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); out_cmd->hdr.group_id = 0; @@ -1097,7 +1534,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); + cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); @@ -1196,14 +1633,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ - if (WARN(txq_id != trans->txqs.cmd.q_id, + if (WARN(txq_id != trans_pcie->txqs.cmd.q_id, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, + txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr, txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; @@ -1306,19 +1743,184 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } #ifdef CONFIG_INET +static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans, + size_t len, struct sk_buff *skb) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page); + struct iwl_tso_page_info *info; + struct page **page_ptr; + dma_addr_t phys; + void *ret; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + + if (WARN_ON(*page_ptr)) + return NULL; + + if (!p->page) + goto alloc; + + /* + * Check if there's enough room on this page + * + * Note that we put a page chaining pointer *last* in the + * page - we need it somewhere, and if it's there then we + * avoid DMA mapping the last bits of the page which may + * trigger the 32-bit boundary hardware bug. + * + * (see also get_workaround_page() in tx-gen2.c) + */ + if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) { + info = IWL_TSO_PAGE_INFO(page_address(p->page)); + goto out; + } + + /* We don't have enough room on this page, get a new one. */ + iwl_pcie_free_and_unmap_tso_page(trans, p->page); + +alloc: + p->page = alloc_page(GFP_ATOMIC); + if (!p->page) + return NULL; + p->pos = page_address(p->page); + + info = IWL_TSO_PAGE_INFO(page_address(p->page)); + + /* set the chaining pointer to NULL */ + info->next = NULL; + + /* Create a DMA mapping for the page */ + phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE, + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(trans->dev, phys))) { + __free_page(p->page); + p->page = NULL; + + return NULL; + } + + /* Store physical address and set use count */ + info->dma_addr = phys; + refcount_set(&info->use_count, 1); +out: + *page_ptr = p->page; + /* Return an internal reference for the caller */ + refcount_inc(&info->use_count); + ret = p->pos; + p->pos += len; + + return ret; +} + +/** + * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list + * @sgt: scatter gather table + * @offset: Offset into the mapped memory (i.e. SKB payload data) + * @len: Length of the area + * + * Find the DMA address that corresponds to the SKB payload data at the + * position given by @offset. + * + * Returns: Address for TB entry + */ +dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, + unsigned int len) +{ + struct scatterlist *sg; + unsigned int sg_offset = 0; + int i; + + /* + * Search the mapped DMA areas in the SG for the area that contains the + * data at offset with the given length. + */ + for_each_sgtable_dma_sg(sgt, sg, i) { + if (offset >= sg_offset && + offset + len <= sg_offset + sg_dma_len(sg)) + return sg_dma_address(sg) + offset - sg_offset; + + sg_offset += sg_dma_len(sg); + } + + WARN_ON_ONCE(1); + + return DMA_MAPPING_ERROR; +} + +/** + * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending + * @trans: transport private data + * @skb: the SKB to map + * @cmd_meta: command meta to store the scatter list information for unmapping + * @hdr: output argument for TSO headers + * @hdr_room: requested length for TSO headers + * @offset: offset into the data from which mapping should start + * + * Allocate space for a scatter gather list and TSO headers and map the SKB + * using the scatter gather list. The SKB is unmapped again when the page is + * free'ed again at the end of the operation. + * + * Returns: newly allocated and mapped scatter gather table with list + */ +struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta, + u8 **hdr, unsigned int hdr_room, + unsigned int offset) +{ + struct sg_table *sgt; + unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1; + int orig_nents; + + if (WARN_ON_ONCE(skb_has_frag_list(skb))) + return NULL; + + *hdr = iwl_pcie_get_page_hdr(trans, + hdr_room + __alignof__(struct sg_table) + + sizeof(struct sg_table) + + n_segments * sizeof(struct scatterlist), + skb); + if (!*hdr) + return NULL; + + sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table)); + sgt->sgl = (void *)(sgt + 1); + + sg_init_table(sgt->sgl, n_segments); + + /* Only map the data, not the header (it is copied to the TSO page) */ + orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset); + if (WARN_ON_ONCE(orig_nents <= 0)) + return NULL; + + sgt->orig_nents = orig_nents; + + /* And map the entire SKB */ + if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0) + return NULL; + + /* Store non-zero (i.e. valid) offset for unmapping */ + cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK; + + return sgt; +} + static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, struct iwl_device_tx_cmd *dev_cmd, u16 tb1_len) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; + unsigned int data_offset = 0; u16 length, iv_len, amsdu_pad; - u8 *start_hdr; - struct iwl_tso_hdr_page *hdr_page; + dma_addr_t start_hdr_phys; + u8 *start_hdr, *pos_hdr; + struct sg_table *sgt; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ @@ -1328,10 +1930,10 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, trace_iwlwifi_dev_tx(trans->dev, skb, iwl_txq_get_tfd(trans, txq, txq->write_ptr), - trans->txqs.tfd.size, + trans_pcie->txqs.tfd.size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); - ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); + ip_hdrlen = skb_network_header_len(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; amsdu_pad = 0; @@ -1341,13 +1943,15 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room, skb); - if (!hdr_page) + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, + snap_ip_tcp_hdrlen + hdr_len + iv_len); + if (!sgt) return -ENOMEM; - start_hdr = hdr_page->pos; - memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); - hdr_page->pos += iv_len; + start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr); + pos_hdr = start_hdr; + memcpy(pos_hdr, skb->data + hdr_len, iv_len); + pos_hdr += iv_len; /* * Pull the ieee80211 header + IV to be able to use TSO core, @@ -1370,45 +1974,43 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, min_t(unsigned int, mss, total_len); unsigned int hdr_tb_len; dma_addr_t hdr_tb_phys; - u8 *subf_hdrs_start = hdr_page->pos; + u8 *subf_hdrs_start = pos_hdr; total_len -= data_left; - memset(hdr_page->pos, 0, amsdu_pad); - hdr_page->pos += amsdu_pad; + memset(pos_hdr, 0, amsdu_pad); + pos_hdr += amsdu_pad; amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + data_left)) & 0x3; - ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); - hdr_page->pos += ETH_ALEN; - ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); - hdr_page->pos += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr)); + pos_hdr += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr)); + pos_hdr += ETH_ALEN; length = snap_ip_tcp_hdrlen + data_left; - *((__be16 *)hdr_page->pos) = cpu_to_be16(length); - hdr_page->pos += sizeof(length); + *((__be16 *)pos_hdr) = cpu_to_be16(length); + pos_hdr += sizeof(length); /* * This will copy the SNAP as well which will be considered * as MAC header. */ - tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); + tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len); - hdr_page->pos += snap_ip_tcp_hdrlen; + pos_hdr += snap_ip_tcp_hdrlen; + + hdr_tb_len = pos_hdr - start_hdr; + hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr); - hdr_tb_len = hdr_page->pos - start_hdr; - hdr_tb_phys = dma_map_single(trans->dev, start_hdr, - hdr_tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) - return -EINVAL; iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, hdr_tb_phys, hdr_tb_len); /* add this subframe's headers' length to the tx_cmd */ - le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); + le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start); /* prepare the start_hdr for the next subframe */ - start_hdr = hdr_page->pos; + start_hdr = pos_hdr; /* put the payload */ while (data_left) { @@ -1416,9 +2018,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, data_left); dma_addr_t tb_phys; - tb_phys = dma_map_single(trans->dev, tso.data, - size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size); + /* Not a real mapping error, use direct comparison */ + if (unlikely(tb_phys == DMA_MAPPING_ERROR)) return -EINVAL; iwl_pcie_txq_build_tfd(trans, txq, tb_phys, @@ -1427,10 +2029,14 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, tb_phys, size); data_left -= size; + data_offset += size; tso_build_data(skb, &tso, size); } } + dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room, + DMA_TO_DEVICE); + /* re -add the WiFi header and IV */ skb_push(skb, hdr_len + iv_len); @@ -1450,9 +2056,61 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, } #endif /* CONFIG_INET */ +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +/* + * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwlagn_scd_bc_tbl *scd_bc_tbl; + int write_ptr = txq->write_ptr; + int txq_id = txq->id; + u8 sec_ctl = 0; + u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + __le16 bc_ent; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + u8 sta_id = tx_cmd->sta_id; + + scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; + + sec_ctl = tx_cmd->sec_ctl; + + switch (sec_ctl & TX_CMD_SEC_MSK) { + case TX_CMD_SEC_CCM: + len += IEEE80211_CCMP_MIC_LEN; + break; + case TX_CMD_SEC_TKIP: + len += IEEE80211_TKIP_ICV_LEN; + break; + case TX_CMD_SEC_WEP: + len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; + break; + } + if (trans_pcie->txqs.bc_table_dword) + len = DIV_ROUND_UP(len, 4); + + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + + bc_ent = cpu_to_le16(len | (sta_id << 12)); + + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = + bc_ent; +} + int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; @@ -1467,14 +2125,14 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, u16 wifi_seq; bool amsdu; - txq = trans->txqs.txq[txq_id]; + txq = trans_pcie->txqs.txq[txq_id]; - if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), + if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; if (skb_is_nonlinear(skb) && - skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && + skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) && __skb_linearize(skb)) return -ENOMEM; @@ -1495,7 +2153,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans->txqs.dev_cmd_offs); + trans_pcie->txqs.dev_cmd_offs); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); @@ -1533,7 +2191,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[txq->write_ptr].meta; - out_meta->flags = 0; + memset(out_meta, 0, sizeof(*out_meta)); /* * The second TB (tb1) points to the remainder of the TX command @@ -1578,7 +2236,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, trace_iwlwifi_dev_tx(trans->dev, skb, iwl_txq_get_tfd(trans, txq, txq->write_ptr), - trans->txqs.tfd.size, + trans_pcie->txqs.tfd.size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -1613,8 +2271,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), - iwl_txq_gen1_tfd_get_num_tbs(trans, - tfd)); + iwl_txq_gen1_tfd_get_num_tbs(tfd)); wait_write_ptr = ieee80211_has_morefrags(fc); @@ -1649,3 +2306,383 @@ out_err: spin_unlock(&txq->lock); return -1; } + +static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, + int read_ptr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; + int txq_id = txq->id; + u8 sta_id = 0; + __le16 bc_ent; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + + WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); + + if (txq_id != trans_pcie->txqs.cmd.q_id) + sta_id = tx_cmd->sta_id; + + bc_ent = cpu_to_le16(1 | (sta_id << 12)); + + scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; + + if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = + bc_ent; +} + +/* Frees buffers until index _not_ inclusive */ +void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, + struct sk_buff_head *skbs, bool is_flush) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; + int tfd_num, read_ptr, last_to_free; + int txq_read_ptr, txq_write_ptr; + + /* This function is not meant to release cmd queue*/ + if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id)) + return; + + if (WARN_ON(!txq)) + return; + + tfd_num = iwl_txq_get_cmd_index(txq, ssn); + + spin_lock_bh(&txq->reclaim_lock); + + spin_lock(&txq->lock); + txq_read_ptr = txq->read_ptr; + txq_write_ptr = txq->write_ptr; + spin_unlock(&txq->lock); + + /* There is nothing to do if we are flushing an empty queue */ + if (is_flush && txq_write_ptr == txq_read_ptr) + goto out; + + read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr); + + if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) { + IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", + txq_id, ssn); + goto out; + } + + if (read_ptr == tfd_num) + goto out; + + IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n", + txq_id, read_ptr, txq_read_ptr, tfd_num, ssn); + + /* Since we free until index _not_ inclusive, the one before index is + * the last we will free. This one must be used + */ + last_to_free = iwl_txq_dec_wrap(trans, tfd_num); + + if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) { + IWL_ERR(trans, + "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", + __func__, txq_id, last_to_free, + trans->trans_cfg->base_params->max_tfd_queue_size, + txq_write_ptr, txq_read_ptr); + + iwl_op_mode_time_point(trans->op_mode, + IWL_FW_INI_TIME_POINT_FAKE_TX, + NULL); + goto out; + } + + if (WARN_ON(!skb_queue_empty(skbs))) + goto out; + + for (; + read_ptr != tfd_num; + txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr), + read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) { + struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta; + struct sk_buff *skb = txq->entries[read_ptr].skb; + + if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n", + read_ptr, txq_read_ptr, txq_id)) + continue; + + iwl_pcie_free_tso_pages(trans, skb, cmd_meta); + + __skb_queue_tail(skbs, skb); + + txq->entries[read_ptr].skb = NULL; + + if (!trans->trans_cfg->gen2) + iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq, + txq_read_ptr); + + iwl_txq_free_tfd(trans, txq, txq_read_ptr); + } + + spin_lock(&txq->lock); + txq->read_ptr = txq_read_ptr; + + iwl_txq_progress(txq); + + if (iwl_txq_space(trans, txq) > txq->low_mark && + test_bit(txq_id, trans_pcie->txqs.queue_stopped)) { + struct sk_buff_head overflow_skbs; + struct sk_buff *skb; + + __skb_queue_head_init(&overflow_skbs); + skb_queue_splice_init(&txq->overflow_q, + is_flush ? skbs : &overflow_skbs); + + /* + * We are going to transmit from the overflow queue. + * Remember this state so that wait_for_txq_empty will know we + * are adding more packets to the TFD queue. It cannot rely on + * the state of &txq->overflow_q, as we just emptied it, but + * haven't TXed the content yet. + */ + txq->overflow_tx = true; + + /* + * This is tricky: we are in reclaim path and are holding + * reclaim_lock, so noone will try to access the txq data + * from that path. We stopped tx, so we can't have tx as well. + * Bottom line, we can unlock and re-lock later. + */ + spin_unlock(&txq->lock); + + while ((skb = __skb_dequeue(&overflow_skbs))) { + struct iwl_device_tx_cmd *dev_cmd_ptr; + + dev_cmd_ptr = *(void **)((u8 *)skb->cb + + trans_pcie->txqs.dev_cmd_offs); + + /* + * Note that we can very well be overflowing again. + * In that case, iwl_txq_space will be small again + * and we won't wake mac80211's queue. + */ + iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); + } + + if (iwl_txq_space(trans, txq) > txq->low_mark) + iwl_trans_pcie_wake_queue(trans, txq); + + spin_lock(&txq->lock); + txq->overflow_tx = false; + } + + spin_unlock(&txq->lock); +out: + spin_unlock_bh(&txq->reclaim_lock); +} + +/* Set wr_ptr of specific device and txq */ +void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; + + spin_lock_bh(&txq->lock); + + txq->write_ptr = ptr; + txq->read_ptr = txq->write_ptr; + + spin_unlock_bh(&txq->lock); +} + +void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, bool freeze) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int queue; + + for_each_set_bit(queue, &txqs, BITS_PER_LONG) { + struct iwl_txq *txq = trans_pcie->txqs.txq[queue]; + unsigned long now; + + spin_lock_bh(&txq->lock); + + now = jiffies; + + if (txq->frozen == freeze) + goto next_queue; + + IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", + freeze ? "Freezing" : "Waking", queue); + + txq->frozen = freeze; + + if (txq->read_ptr == txq->write_ptr) + goto next_queue; + + if (freeze) { + if (unlikely(time_after(now, + txq->stuck_timer.expires))) { + /* + * The timer should have fired, maybe it is + * spinning right now on the lock. + */ + goto next_queue; + } + /* remember how long until the timer fires */ + txq->frozen_expiry_remainder = + txq->stuck_timer.expires - now; + del_timer(&txq->stuck_timer); + goto next_queue; + } + + /* + * Wake a non-empty queue -> arm timer with the + * remainder before it froze + */ + mod_timer(&txq->stuck_timer, + now + txq->frozen_expiry_remainder); + +next_queue: + spin_unlock_bh(&txq->lock); + } +} + +#define HOST_COMPLETE_TIMEOUT (2 * HZ) + +static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + int cmd_idx; + int ret; + + IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); + + if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + "Command %s: a command is already active!\n", cmd_str)) + return -EIO; + + IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); + + if (trans->trans_cfg->gen2) + cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); + else + cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); + + if (cmd_idx < 0) { + ret = cmd_idx; + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", + cmd_str, ret); + return ret; + } + + ret = wait_event_timeout(trans->wait_command_queue, + !test_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + IWL_ERR(trans, "Error sending %s: time out after %dms.\n", + cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", + txq->read_ptr, txq->write_ptr); + + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", + cmd_str); + ret = -ETIMEDOUT; + + iwl_trans_sync_nmi(trans); + goto cancel; + } + + if (test_bit(STATUS_FW_ERROR, &trans->status)) { + if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, + &trans->status)) { + IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); + dump_stack(); + } + ret = -EIO; + goto cancel; + } + + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); + ret = -ERFKILL; + goto cancel; + } + + if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { + IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); + ret = -EIO; + goto cancel; + } + + return 0; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; + } + + if (cmd->resp_pkt) { + iwl_free_resp(cmd); + cmd->resp_pkt = NULL; + } + + return ret; +} + +int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + /* Make sure the NIC is still alive in the bus */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return -ENODEV; + + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", + cmd->id); + return -ERFKILL; + } + + if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && + !(cmd->flags & CMD_SEND_IN_D3))) { + IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); + return -EHOSTDOWN; + } + + if (cmd->flags & CMD_ASYNC) { + int ret; + + /* An asynchronous command can not expect an SKB to be set. */ + if (WARN_ON(cmd->flags & CMD_WANT_SKB)) + return -EINVAL; + + if (trans->trans_cfg->gen2) + ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); + else + ret = iwl_pcie_enqueue_hcmd(trans, cmd); + + if (ret < 0) { + IWL_ERR(trans, + "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_get_cmd_string(trans, cmd->id), ret); + return ret; + } + return 0; + } + + return iwl_trans_pcie_send_hcmd_sync(trans, cmd); +} +IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c deleted file mode 100644 index ca74b1b63cac..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ /dev/null @@ -1,1898 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause -/* - * Copyright (C) 2020-2023 Intel Corporation - */ -#include <net/tso.h> -#include <linux/tcp.h> - -#include "iwl-debug.h" -#include "iwl-io.h" -#include "fw/api/commands.h" -#include "fw/api/tx.h" -#include "fw/api/datapath.h" -#include "fw/api/debug.h" -#include "queue/tx.h" -#include "iwl-fh.h" -#include "iwl-scd.h" -#include <linux/dmapool.h> - -/* - * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array - */ -static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs) -{ - int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - u8 filled_tfd_size, num_fetch_chunks; - u16 len = byte_cnt; - __le16 bc_ent; - - if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) - return; - - filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + - num_tbs * sizeof(struct iwl_tfh_tb); - /* - * filled_tfd_size contains the number of filled bytes in the TFD. - * Dividing it by 64 will give the number of chunks to fetch - * to SRAM- 0 for one chunk, 1 for 2 and so on. - * If, for example, TFD contains only 3 TBs then 32 bytes - * of the TFD are used, and only one chunk of 64 bytes should - * be fetched - */ - num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; - - /* Starting from AX210, the HW expects bytes */ - WARN_ON(trans->txqs.bc_table_dword); - WARN_ON(len > 0x3FFF); - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); - scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; - } else { - struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; - - /* Before AX210, the HW expects DW */ - WARN_ON(!trans->txqs.bc_table_dword); - len = DIV_ROUND_UP(len, 4); - WARN_ON(len > 0xFFF); - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - scd_bc_tbl->tfd_offset[idx] = bc_ent; - } -} - -/* - * iwl_txq_inc_wr_ptr - Send new write index to hardware - */ -void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) -{ - lockdep_assert_held(&txq->lock); - - IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); - - /* - * if not in power-save mode, uCode will never sleep when we're - * trying to tx (during RFKILL, we're not trying to tx). - */ - iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); -} - -static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, - struct iwl_tfh_tfd *tfd) -{ - return le16_to_cpu(tfd->num_tbs) & 0x1f; -} - -int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, - dma_addr_t addr, u16 len) -{ - int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); - struct iwl_tfh_tb *tb; - - /* Only WARN here so we know about the issue, but we mess up our - * unmap path because not every place currently checks for errors - * returned from this function - it can only return an error if - * there's no more space, and so when we know there is enough we - * don't always check ... - */ - WARN(iwl_txq_crosses_4g_boundary(addr, len), - "possible DMA problem with iova:0x%llx, len:%d\n", - (unsigned long long)addr, len); - - if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) - return -EINVAL; - tb = &tfd->tbs[idx]; - - /* Each TFD can point to a maximum max_tbs Tx buffers */ - if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { - IWL_ERR(trans, "Error can not send more than %d chunks\n", - trans->txqs.tfd.max_tbs); - return -EINVAL; - } - - put_unaligned_le64(addr, &tb->addr); - tb->tb_len = cpu_to_le16(len); - - tfd->num_tbs = cpu_to_le16(idx + 1); - - return idx; -} - -static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans, - struct iwl_tfh_tfd *tfd) -{ - tfd->num_tbs = 0; - - iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, - trans->invalid_tx_cmd.size); -} - -void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, - struct iwl_tfh_tfd *tfd) -{ - int i, num_tbs; - - /* Sanity check on number of chunks */ - num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); - - if (num_tbs > trans->txqs.tfd.max_tbs) { - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); - return; - } - - /* first TB is never freed - it's the bidirectional DMA data */ - for (i = 1; i < num_tbs; i++) { - if (meta->tbs & BIT(i)) - dma_unmap_page(trans->dev, - le64_to_cpu(tfd->tbs[i].addr), - le16_to_cpu(tfd->tbs[i].tb_len), - DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, - le64_to_cpu(tfd->tbs[i].addr), - le16_to_cpu(tfd->tbs[i].tb_len), - DMA_TO_DEVICE); - } - - iwl_txq_set_tfd_invalid_gen2(trans, tfd); -} - -void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) -{ - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and - * idx is bounded by n_window - */ - int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); - struct sk_buff *skb; - - lockdep_assert_held(&txq->lock); - - if (!txq->entries) - return; - - iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_txq_get_tfd(trans, txq, idx)); - - skb = txq->entries[idx].skb; - - /* Can be called from irqs-disabled context - * If skb is not NULL, it means that the whole queue is being - * freed and that the queue is not empty - free the skb - */ - if (skb) { - iwl_op_mode_free_skb(trans->op_mode, skb); - txq->entries[idx].skb = NULL; - } -} - -static struct page *get_workaround_page(struct iwl_trans *trans, - struct sk_buff *skb) -{ - struct page **page_ptr; - struct page *ret; - - page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); - - ret = alloc_page(GFP_ATOMIC); - if (!ret) - return NULL; - - /* set the chaining pointer to the previous page if there */ - *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; - *page_ptr = ret; - - return ret; -} - -/* - * Add a TB and if needed apply the FH HW bug workaround; - * meta != NULL indicates that it's a page mapping and we - * need to dma_unmap_page() and set the meta->tbs bit in - * this case. - */ -static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, - struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, - dma_addr_t phys, void *virt, - u16 len, struct iwl_cmd_meta *meta) -{ - dma_addr_t oldphys = phys; - struct page *page; - int ret; - - if (unlikely(dma_mapping_error(trans->dev, phys))) - return -ENOMEM; - - if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { - ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); - - if (ret < 0) - goto unmap; - - if (meta) - meta->tbs |= BIT(ret); - - ret = 0; - goto trace; - } - - /* - * Work around a hardware bug. If (as expressed in the - * condition above) the TB ends on a 32-bit boundary, - * then the next TB may be accessed with the wrong - * address. - * To work around it, copy the data elsewhere and make - * a new mapping for it so the device will not fail. - */ - - if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { - ret = -ENOBUFS; - goto unmap; - } - - page = get_workaround_page(trans, skb); - if (!page) { - ret = -ENOMEM; - goto unmap; - } - - memcpy(page_address(page), virt, len); - - phys = dma_map_single(trans->dev, page_address(page), len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, phys))) - return -ENOMEM; - ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); - if (ret < 0) { - /* unmap the new allocation as single */ - oldphys = phys; - meta = NULL; - goto unmap; - } - IWL_WARN(trans, - "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", - len, (unsigned long long)oldphys, (unsigned long long)phys); - - ret = 0; -unmap: - if (meta) - dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); -trace: - trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); - - return ret; -} - -#ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, - struct sk_buff *skb) -{ - struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); - struct page **page_ptr; - - page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); - - if (WARN_ON(*page_ptr)) - return NULL; - - if (!p->page) - goto alloc; - - /* - * Check if there's enough room on this page - * - * Note that we put a page chaining pointer *last* in the - * page - we need it somewhere, and if it's there then we - * avoid DMA mapping the last bits of the page which may - * trigger the 32-bit boundary hardware bug. - * - * (see also get_workaround_page() in tx-gen2.c) - */ - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - - sizeof(void *)) - goto out; - - /* We don't have enough room on this page, get a new one. */ - __free_page(p->page); - -alloc: - p->page = alloc_page(GFP_ATOMIC); - if (!p->page) - return NULL; - p->pos = page_address(p->page); - /* set the chaining pointer to NULL */ - *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; -out: - *page_ptr = p->page; - get_page(p->page); - return p; -} -#endif - -static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, - struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, int start_len, - u8 hdr_len, - struct iwl_device_tx_cmd *dev_cmd) -{ -#ifdef CONFIG_INET - struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; - struct ieee80211_hdr *hdr = (void *)skb->data; - unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; - unsigned int mss = skb_shinfo(skb)->gso_size; - u16 length, amsdu_pad; - u8 *start_hdr; - struct iwl_tso_hdr_page *hdr_page; - struct tso_t tso; - - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), - &dev_cmd->hdr, start_len, 0); - - ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); - snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); - total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; - amsdu_pad = 0; - - /* total amount of header we may need for this A-MSDU */ - hdr_room = DIV_ROUND_UP(total_len, mss) * - (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); - - /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room, skb); - if (!hdr_page) - return -ENOMEM; - - start_hdr = hdr_page->pos; - - /* - * Pull the ieee80211 header to be able to use TSO core, - * we will restore it for the tx_status flow. - */ - skb_pull(skb, hdr_len); - - /* - * Remove the length of all the headers that we don't actually - * have in the MPDU by themselves, but that we duplicate into - * all the different MSDUs inside the A-MSDU. - */ - le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); - - tso_start(skb, &tso); - - while (total_len) { - /* this is the data left for this subframe */ - unsigned int data_left = min_t(unsigned int, mss, total_len); - unsigned int tb_len; - dma_addr_t tb_phys; - u8 *subf_hdrs_start = hdr_page->pos; - - total_len -= data_left; - - memset(hdr_page->pos, 0, amsdu_pad); - hdr_page->pos += amsdu_pad; - amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + - data_left)) & 0x3; - ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); - hdr_page->pos += ETH_ALEN; - ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); - hdr_page->pos += ETH_ALEN; - - length = snap_ip_tcp_hdrlen + data_left; - *((__be16 *)hdr_page->pos) = cpu_to_be16(length); - hdr_page->pos += sizeof(length); - - /* - * This will copy the SNAP as well which will be considered - * as MAC header. - */ - tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); - - hdr_page->pos += snap_ip_tcp_hdrlen; - - tb_len = hdr_page->pos - start_hdr; - tb_phys = dma_map_single(trans->dev, start_hdr, - tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; - /* - * No need for _with_wa, this is from the TSO page and - * we leave some space at the end of it so can't hit - * the buggy scenario. - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, - tb_phys, tb_len); - /* add this subframe's headers' length to the tx_cmd */ - le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); - - /* prepare the start_hdr for the next subframe */ - start_hdr = hdr_page->pos; - - /* put the payload */ - while (data_left) { - int ret; - - tb_len = min_t(unsigned int, tso.size, data_left); - tb_phys = dma_map_single(trans->dev, tso.data, - tb_len, DMA_TO_DEVICE); - ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, - tb_phys, tso.data, - tb_len, NULL); - if (ret) - goto out_err; - - data_left -= tb_len; - tso_build_data(skb, &tso, tb_len); - } - } - - /* re -add the WiFi header */ - skb_push(skb, hdr_len); - - return 0; - -out_err: -#endif - return -EINVAL; -} - -static struct -iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_tx_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta, - int hdr_len, - int tx_cmd_len) -{ - int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); - dma_addr_t tb_phys; - int len; - void *tb1_addr; - - tb_phys = iwl_txq_get_first_tb_dma(txq, idx); - - /* - * No need for _with_wa, the first TB allocation is aligned up - * to a 64-byte boundary and thus can't be at the end or cross - * a page boundary (much less a 2^32 boundary). - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); - - /* - * The second TB (tb1) points to the remainder of the TX command - * and the 802.11 header - dword aligned size - * (This calculation modifies the TX command, so do it before the - * setup of the first TB) - */ - len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - - IWL_FIRST_TB_SIZE; - - /* do not align A-MSDU to dword as the subframe header aligns it */ - - /* map the data for TB1 */ - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; - tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; - /* - * No need for _with_wa(), we ensure (via alignment) that the data - * here can never cross or end at a page boundary. - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); - - if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, - hdr_len, dev_cmd)) - goto out_err; - - /* building the A-MSDU might have changed this data, memcpy it now */ - memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); - return tfd; - -out_err: - iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); - return NULL; -} - -static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, - struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, - struct iwl_cmd_meta *out_meta) -{ - int i; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - dma_addr_t tb_phys; - unsigned int fragsz = skb_frag_size(frag); - int ret; - - if (!fragsz) - continue; - - tb_phys = skb_frag_dma_map(trans->dev, frag, 0, - fragsz, DMA_TO_DEVICE); - ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, - skb_frag_address(frag), - fragsz, out_meta); - if (ret) - return ret; - } - - return 0; -} - -static struct -iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_tx_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta, - int hdr_len, - int tx_cmd_len, - bool pad) -{ - int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); - dma_addr_t tb_phys; - int len, tb1_len, tb2_len; - void *tb1_addr; - struct sk_buff *frag; - - tb_phys = iwl_txq_get_first_tb_dma(txq, idx); - - /* The first TB points to bi-directional DMA data */ - memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); - - /* - * No need for _with_wa, the first TB allocation is aligned up - * to a 64-byte boundary and thus can't be at the end or cross - * a page boundary (much less a 2^32 boundary). - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); - - /* - * The second TB (tb1) points to the remainder of the TX command - * and the 802.11 header - dword aligned size - * (This calculation modifies the TX command, so do it before the - * setup of the first TB) - */ - len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - - IWL_FIRST_TB_SIZE; - - if (pad) - tb1_len = ALIGN(len, 4); - else - tb1_len = len; - - /* map the data for TB1 */ - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; - tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; - /* - * No need for _with_wa(), we ensure (via alignment) that the data - * here can never cross or end at a page boundary. - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, - IWL_FIRST_TB_SIZE + tb1_len, hdr_len); - - /* set up TFD's third entry to point to remainder of skb's head */ - tb2_len = skb_headlen(skb) - hdr_len; - - if (tb2_len > 0) { - int ret; - - tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, - tb2_len, DMA_TO_DEVICE); - ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, - skb->data + hdr_len, tb2_len, - NULL); - if (ret) - goto out_err; - } - - if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) - goto out_err; - - skb_walk_frags(skb, frag) { - int ret; - - tb_phys = dma_map_single(trans->dev, frag->data, - skb_headlen(frag), DMA_TO_DEVICE); - ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, - frag->data, - skb_headlen(frag), NULL); - if (ret) - goto out_err; - if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) - goto out_err; - } - - return tfd; - -out_err: - iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); - return NULL; -} - -static -struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_tx_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); - int len, hdr_len; - bool amsdu; - - /* There must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); - BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + - offsetofend(struct iwl_tx_cmd_gen2, dram_info) > - IWL_FIRST_TB_SIZE); - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); - BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + - offsetofend(struct iwl_tx_cmd_gen3, dram_info) > - IWL_FIRST_TB_SIZE); - - memset(tfd, 0, sizeof(*tfd)); - - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) - len = sizeof(struct iwl_tx_cmd_gen2); - else - len = sizeof(struct iwl_tx_cmd_gen3); - - amsdu = ieee80211_is_data_qos(hdr->frame_control) && - (*ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_A_MSDU_PRESENT); - - hdr_len = ieee80211_hdrlen(hdr->frame_control); - - /* - * Only build A-MSDUs here if doing so by GSO, otherwise it may be - * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been - * built in the higher layers already. - */ - if (amsdu && skb_shinfo(skb)->gso_size) - return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, - out_meta, hdr_len, len); - return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, - hdr_len, len, !amsdu); -} - -int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) -{ - unsigned int max; - unsigned int used; - - /* - * To avoid ambiguity between empty and completely full queues, there - * should always be less than max_tfd_queue_size elements in the queue. - * If q->n_window is smaller than max_tfd_queue_size, there is no need - * to reserve any queue entries for this purpose. - */ - if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) - max = q->n_window; - else - max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; - - /* - * max_tfd_queue_size is a power of 2, so the following is equivalent to - * modulo by max_tfd_queue_size and is well defined. - */ - used = (q->write_ptr - q->read_ptr) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); - - if (WARN_ON(used > max)) - return 0; - - return max - used; -} - -int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int txq_id) -{ - struct iwl_cmd_meta *out_meta; - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - u16 cmd_len; - int idx; - void *tfd; - - if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, - "queue %d out of range", txq_id)) - return -EINVAL; - - if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), - "TX on unused queue %d\n", txq_id)) - return -EINVAL; - - if (skb_is_nonlinear(skb) && - skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && - __skb_linearize(skb)) - return -ENOMEM; - - spin_lock(&txq->lock); - - if (iwl_txq_space(trans, txq) < txq->high_mark) { - iwl_txq_stop(trans, txq); - - /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_txq_space(trans, txq) < 3)) { - struct iwl_device_tx_cmd **dev_cmd_ptr; - - dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans->txqs.dev_cmd_offs); - - *dev_cmd_ptr = dev_cmd; - __skb_queue_tail(&txq->overflow_q, skb); - spin_unlock(&txq->lock); - return 0; - } - } - - idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - - /* Set up driver data for this TFD */ - txq->entries[idx].skb = skb; - txq->entries[idx].cmd = dev_cmd; - - dev_cmd->hdr.sequence = - cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(idx))); - - /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_meta = &txq->entries[idx].meta; - out_meta->flags = 0; - - tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); - if (!tfd) { - spin_unlock(&txq->lock); - return -1; - } - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen3->len); - } else { - struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen2->len); - } - - /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, - iwl_txq_gen2_get_num_tbs(trans, tfd)); - - /* start timer if queue currently empty */ - if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); - - /* Tell device the write index *just past* this latest filled TFD */ - txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); - iwl_txq_inc_wr_ptr(trans, txq); - /* - * At this point the frame is "transmitted" successfully - * and we will get a TX status notification eventually. - */ - spin_unlock(&txq->lock); - return 0; -} - -/*************** HOST COMMAND QUEUE FUNCTIONS *****/ - -/* - * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's - */ -void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) -{ - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - - spin_lock_bh(&txq->lock); - while (txq->write_ptr != txq->read_ptr) { - IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", - txq_id, txq->read_ptr); - - if (txq_id != trans->txqs.cmd.q_id) { - int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); - struct sk_buff *skb = txq->entries[idx].skb; - - if (!WARN_ON_ONCE(!skb)) - iwl_txq_free_tso_page(trans, skb); - } - iwl_txq_gen2_free_tfd(trans, txq); - txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); - } - - while (!skb_queue_empty(&txq->overflow_q)) { - struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); - - iwl_op_mode_free_skb(trans->op_mode, skb); - } - - spin_unlock_bh(&txq->lock); - - /* just in case - this queue may have been stopped */ - iwl_wake_queue(trans, txq); -} - -static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct device *dev = trans->dev; - - /* De-alloc circular buffer of TFDs */ - if (txq->tfds) { - dma_free_coherent(dev, - trans->txqs.tfd.size * txq->n_window, - txq->tfds, txq->dma_addr); - dma_free_coherent(dev, - sizeof(*txq->first_tb_bufs) * txq->n_window, - txq->first_tb_bufs, txq->first_tb_dma); - } - - kfree(txq->entries); - if (txq->bc_tbl.addr) - dma_pool_free(trans->txqs.bc_pool, - txq->bc_tbl.addr, txq->bc_tbl.dma); - kfree(txq); -} - -/* - * iwl_pcie_txq_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) -{ - struct iwl_txq *txq; - int i; - - if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, - "queue %d out of range", txq_id)) - return; - - txq = trans->txqs.txq[txq_id]; - - if (WARN_ON(!txq)) - return; - - iwl_txq_gen2_unmap(trans, txq_id); - - /* De-alloc array of command/tx buffers */ - if (txq_id == trans->txqs.cmd.q_id) - for (i = 0; i < txq->n_window; i++) { - kfree_sensitive(txq->entries[i].cmd); - kfree_sensitive(txq->entries[i].free_buf); - } - del_timer_sync(&txq->stuck_timer); - - iwl_txq_gen2_free_memory(trans, txq); - - trans->txqs.txq[txq_id] = NULL; - - clear_bit(txq_id, trans->txqs.queue_used); -} - -/* - * iwl_queue_init - Initialize queue's high/low-water and read/write indexes - */ -static int iwl_queue_init(struct iwl_txq *q, int slots_num) -{ - q->n_window = slots_num; - - /* slots_num must be power-of-two size, otherwise - * iwl_txq_get_cmd_index is broken. */ - if (WARN_ON(!is_power_of_2(slots_num))) - return -EINVAL; - - q->low_mark = q->n_window / 4; - if (q->low_mark < 4) - q->low_mark = 4; - - q->high_mark = q->n_window / 8; - if (q->high_mark < 2) - q->high_mark = 2; - - q->write_ptr = 0; - q->read_ptr = 0; - - return 0; -} - -int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, - bool cmd_queue) -{ - int ret; - u32 tfd_queue_max_size = - trans->trans_cfg->base_params->max_tfd_queue_size; - - txq->need_update = false; - - /* max_tfd_queue_size must be power-of-two size, otherwise - * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ - if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), - "Max tfd queue size must be a power of two, but is %d", - tfd_queue_max_size)) - return -EINVAL; - - /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(txq, slots_num); - if (ret) - return ret; - - spin_lock_init(&txq->lock); - - if (cmd_queue) { - static struct lock_class_key iwl_txq_cmd_queue_lock_class; - - lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); - } - - __skb_queue_head_init(&txq->overflow_q); - - return 0; -} - -void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) -{ - struct page **page_ptr; - struct page *next; - - page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); - next = *page_ptr; - *page_ptr = NULL; - - while (next) { - struct page *tmp = next; - - next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - - sizeof(void *)); - __free_page(tmp); - } -} - -void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) -{ - u32 txq_id = txq->id; - u32 status; - bool active; - u8 fifo; - - if (trans->trans_cfg->gen2) { - IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, - txq->read_ptr, txq->write_ptr); - /* TODO: access new SCD registers and dump them */ - return; - } - - status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); - fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - - IWL_ERR(trans, - "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", - txq_id, active ? "" : "in", fifo, - jiffies_to_msecs(txq->wd_timeout), - txq->read_ptr, txq->write_ptr, - iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), - iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); -} - -static void iwl_txq_stuck_timer(struct timer_list *t) -{ - struct iwl_txq *txq = from_timer(txq, t, stuck_timer); - struct iwl_trans *trans = txq->trans; - - spin_lock(&txq->lock); - /* check if triggered erroneously */ - if (txq->read_ptr == txq->write_ptr) { - spin_unlock(&txq->lock); - return; - } - spin_unlock(&txq->lock); - - iwl_txq_log_scd_error(trans, txq); - - iwl_force_nmi(trans); -} - -static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, - struct iwl_tfd *tfd) -{ - tfd->num_tbs = 0; - - iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma, - trans->invalid_tx_cmd.size); -} - -int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, - bool cmd_queue) -{ - size_t num_entries = trans->trans_cfg->gen2 ? - slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; - size_t tfd_sz; - size_t tb0_buf_sz; - int i; - - if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) - return -EINVAL; - - if (WARN_ON(txq->entries || txq->tfds)) - return -EINVAL; - - tfd_sz = trans->txqs.tfd.size * num_entries; - - timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); - txq->trans = trans; - - txq->n_window = slots_num; - - txq->entries = kcalloc(slots_num, - sizeof(struct iwl_pcie_txq_entry), - GFP_KERNEL); - - if (!txq->entries) - goto error; - - if (cmd_queue) - for (i = 0; i < slots_num; i++) { - txq->entries[i].cmd = - kmalloc(sizeof(struct iwl_device_cmd), - GFP_KERNEL); - if (!txq->entries[i].cmd) - goto error; - } - - /* Circular buffer of transmit frame descriptors (TFDs), - * shared with device */ - txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, - &txq->dma_addr, GFP_KERNEL); - if (!txq->tfds) - goto error; - - BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); - - tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; - - txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, - &txq->first_tb_dma, - GFP_KERNEL); - if (!txq->first_tb_bufs) - goto err_free_tfds; - - for (i = 0; i < num_entries; i++) { - void *tfd = iwl_txq_get_tfd(trans, txq, i); - - if (trans->trans_cfg->gen2) - iwl_txq_set_tfd_invalid_gen2(trans, tfd); - else - iwl_txq_set_tfd_invalid_gen1(trans, tfd); - } - - return 0; -err_free_tfds: - dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); - txq->tfds = NULL; -error: - if (txq->entries && cmd_queue) - for (i = 0; i < slots_num; i++) - kfree(txq->entries[i].cmd); - kfree(txq->entries); - txq->entries = NULL; - - return -ENOMEM; -} - -static struct iwl_txq * -iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) -{ - size_t bc_tbl_size, bc_tbl_entries; - struct iwl_txq *txq; - int ret; - - WARN_ON(!trans->txqs.bc_tbl_size); - - bc_tbl_size = trans->txqs.bc_tbl_size; - bc_tbl_entries = bc_tbl_size / sizeof(u16); - - if (WARN_ON(size > bc_tbl_entries)) - return ERR_PTR(-EINVAL); - - txq = kzalloc(sizeof(*txq), GFP_KERNEL); - if (!txq) - return ERR_PTR(-ENOMEM); - - txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, - &txq->bc_tbl.dma); - if (!txq->bc_tbl.addr) { - IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); - kfree(txq); - return ERR_PTR(-ENOMEM); - } - - ret = iwl_txq_alloc(trans, txq, size, false); - if (ret) { - IWL_ERR(trans, "Tx queue alloc failed\n"); - goto error; - } - ret = iwl_txq_init(trans, txq, size, false); - if (ret) { - IWL_ERR(trans, "Tx queue init failed\n"); - goto error; - } - - txq->wd_timeout = msecs_to_jiffies(timeout); - - return txq; - -error: - iwl_txq_gen2_free_memory(trans, txq); - return ERR_PTR(ret); -} - -static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_host_cmd *hcmd) -{ - struct iwl_tx_queue_cfg_rsp *rsp; - int ret, qid; - u32 wr_ptr; - - if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != - sizeof(*rsp))) { - ret = -EINVAL; - goto error_free_resp; - } - - rsp = (void *)hcmd->resp_pkt->data; - qid = le16_to_cpu(rsp->queue_number); - wr_ptr = le16_to_cpu(rsp->write_pointer); - - if (qid >= ARRAY_SIZE(trans->txqs.txq)) { - WARN_ONCE(1, "queue index %d unsupported", qid); - ret = -EIO; - goto error_free_resp; - } - - if (test_and_set_bit(qid, trans->txqs.queue_used)) { - WARN_ONCE(1, "queue %d already used", qid); - ret = -EIO; - goto error_free_resp; - } - - if (WARN_ONCE(trans->txqs.txq[qid], - "queue %d already allocated\n", qid)) { - ret = -EIO; - goto error_free_resp; - } - - txq->id = qid; - trans->txqs.txq[qid] = txq; - wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); - - /* Place first TFD at index corresponding to start sequence number */ - txq->read_ptr = wr_ptr; - txq->write_ptr = wr_ptr; - - IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); - - iwl_free_resp(hcmd); - return qid; - -error_free_resp: - iwl_free_resp(hcmd); - iwl_txq_gen2_free_memory(trans, txq); - return ret; -} - -int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, - u8 tid, int size, unsigned int timeout) -{ - struct iwl_txq *txq; - union { - struct iwl_tx_queue_cfg_cmd old; - struct iwl_scd_queue_cfg_cmd new; - } cmd; - struct iwl_host_cmd hcmd = { - .flags = CMD_WANT_SKB, - }; - int ret; - - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && - trans->hw_rev_step == SILICON_A_STEP) - size = 4096; - - txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); - if (IS_ERR(txq)) - return PTR_ERR(txq); - - if (trans->txqs.queue_alloc_cmd_ver == 0) { - memset(&cmd.old, 0, sizeof(cmd.old)); - cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); - cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); - cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); - cmd.old.tid = tid; - - if (hweight32(sta_mask) != 1) { - ret = -EINVAL; - goto error; - } - cmd.old.sta_id = ffs(sta_mask) - 1; - - hcmd.id = SCD_QUEUE_CFG; - hcmd.len[0] = sizeof(cmd.old); - hcmd.data[0] = &cmd.old; - } else if (trans->txqs.queue_alloc_cmd_ver == 3) { - memset(&cmd.new, 0, sizeof(cmd.new)); - cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); - cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); - cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); - cmd.new.u.add.flags = cpu_to_le32(flags); - cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); - cmd.new.u.add.tid = tid; - - hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); - hcmd.len[0] = sizeof(cmd.new); - hcmd.data[0] = &cmd.new; - } else { - ret = -EOPNOTSUPP; - goto error; - } - - ret = iwl_trans_send_cmd(trans, &hcmd); - if (ret) - goto error; - - return iwl_txq_alloc_response(trans, txq, &hcmd); - -error: - iwl_txq_gen2_free_memory(trans, txq); - return ret; -} - -void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) -{ - if (WARN(queue >= IWL_MAX_TVQM_QUEUES, - "queue %d out of range", queue)) - return; - - /* - * Upon HW Rfkill - we stop the device, and then stop the queues - * in the op_mode. Just for the sake of the simplicity of the op_mode, - * allow the op_mode to call txq_disable after it already called - * stop_device. - */ - if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { - WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), - "queue %d not used", queue); - return; - } - - iwl_txq_gen2_free(trans, queue); - - IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); -} - -void iwl_txq_gen2_tx_free(struct iwl_trans *trans) -{ - int i; - - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); - - /* Free all TX queues */ - for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { - if (!trans->txqs.txq[i]) - continue; - - iwl_txq_gen2_free(trans, i); - } -} - -int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) -{ - struct iwl_txq *queue; - int ret; - - /* alloc and init the tx queue */ - if (!trans->txqs.txq[txq_id]) { - queue = kzalloc(sizeof(*queue), GFP_KERNEL); - if (!queue) { - IWL_ERR(trans, "Not enough memory for tx queue\n"); - return -ENOMEM; - } - trans->txqs.txq[txq_id] = queue; - ret = iwl_txq_alloc(trans, queue, queue_size, true); - if (ret) { - IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); - goto error; - } - } else { - queue = trans->txqs.txq[txq_id]; - } - - ret = iwl_txq_init(trans, queue, queue_size, - (txq_id == trans->txqs.cmd.q_id)); - if (ret) { - IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); - goto error; - } - trans->txqs.txq[txq_id]->id = txq_id; - set_bit(txq_id, trans->txqs.queue_used); - - return 0; - -error: - iwl_txq_gen2_tx_free(trans); - return ret; -} - -static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, - struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - dma_addr_t addr; - dma_addr_t hi_len; - - addr = get_unaligned_le32(&tb->lo); - - if (sizeof(dma_addr_t) <= sizeof(u32)) - return addr; - - hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; - - /* - * shift by 16 twice to avoid warnings on 32-bit - * (where this code never runs anyway due to the - * if statement above) - */ - return addr | ((hi_len << 16) << 16); -} - -void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_txq *txq, int index) -{ - int i, num_tbs; - struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); - - /* Sanity check on number of chunks */ - num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); - - if (num_tbs > trans->txqs.tfd.max_tbs) { - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* first TB is never freed - it's the bidirectional DMA data */ - - for (i = 1; i < num_tbs; i++) { - if (meta->tbs & BIT(i)) - dma_unmap_page(trans->dev, - iwl_txq_gen1_tfd_tb_get_addr(trans, - tfd, i), - iwl_txq_gen1_tfd_tb_get_len(trans, - tfd, i), - DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, - iwl_txq_gen1_tfd_tb_get_addr(trans, - tfd, i), - iwl_txq_gen1_tfd_tb_get_len(trans, - tfd, i), - DMA_TO_DEVICE); - } - - meta->tbs = 0; - - iwl_txq_set_tfd_invalid_gen1(trans, tfd); -} - -#define IWL_TX_CRC_SIZE 4 -#define IWL_TX_DELIMITER_SIZE 4 - -/* - * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array - */ -void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs) -{ - struct iwlagn_scd_bc_tbl *scd_bc_tbl; - int write_ptr = txq->write_ptr; - int txq_id = txq->id; - u8 sec_ctl = 0; - u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; - __le16 bc_ent; - struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - u8 sta_id = tx_cmd->sta_id; - - scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; - - sec_ctl = tx_cmd->sec_ctl; - - switch (sec_ctl & TX_CMD_SEC_MSK) { - case TX_CMD_SEC_CCM: - len += IEEE80211_CCMP_MIC_LEN; - break; - case TX_CMD_SEC_TKIP: - len += IEEE80211_TKIP_ICV_LEN; - break; - case TX_CMD_SEC_WEP: - len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; - break; - } - if (trans->txqs.bc_table_dword) - len = DIV_ROUND_UP(len, 4); - - if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) - return; - - bc_ent = cpu_to_le16(len | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; - - if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = - bc_ent; -} - -void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; - int txq_id = txq->id; - int read_ptr = txq->read_ptr; - u8 sta_id = 0; - __le16 bc_ent; - struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - - WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - - if (txq_id != trans->txqs.cmd.q_id) - sta_id = tx_cmd->sta_id; - - bc_ent = cpu_to_le16(1 | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; - - if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = - bc_ent; -} - -/* - * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] - * @trans - transport private data - * @txq - tx queue - * @dma_dir - the direction of the DMA mapping - * - * Does NOT advance any TFD circular buffer read/write indexes - * Does NOT free the TFD itself (which is within circular buffer) - */ -void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) -{ - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and - * idx is bounded by n_window - */ - int rd_ptr = txq->read_ptr; - int idx = iwl_txq_get_cmd_index(txq, rd_ptr); - struct sk_buff *skb; - - lockdep_assert_held(&txq->lock); - - if (!txq->entries) - return; - - /* We have only q->n_window txq->entries, but we use - * TFD_QUEUE_SIZE_MAX tfds - */ - if (trans->trans_cfg->gen2) - iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_txq_get_tfd(trans, txq, rd_ptr)); - else - iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, - txq, rd_ptr); - - /* free SKB */ - skb = txq->entries[idx].skb; - - /* Can be called from irqs-disabled context - * If skb is not NULL, it means that the whole queue is being - * freed and that the queue is not empty - free the skb - */ - if (skb) { - iwl_op_mode_free_skb(trans->op_mode, skb); - txq->entries[idx].skb = NULL; - } -} - -void iwl_txq_progress(struct iwl_txq *txq) -{ - lockdep_assert_held(&txq->lock); - - if (!txq->wd_timeout) - return; - - /* - * station is asleep and we send data - that must - * be uAPSD or PS-Poll. Don't rearm the timer. - */ - if (txq->frozen) - return; - - /* - * if empty delete timer, otherwise move timer forward - * since we're making progress on this queue - */ - if (txq->read_ptr == txq->write_ptr) - del_timer(&txq->stuck_timer); - else - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); -} - -/* Frees buffers until index _not_ inclusive */ -void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs, bool is_flush) -{ - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - int tfd_num, read_ptr, last_to_free; - - /* This function is not meant to release cmd queue*/ - if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) - return; - - if (WARN_ON(!txq)) - return; - - tfd_num = iwl_txq_get_cmd_index(txq, ssn); - read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); - - spin_lock_bh(&txq->lock); - - if (!test_bit(txq_id, trans->txqs.queue_used)) { - IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", - txq_id, ssn); - goto out; - } - - if (read_ptr == tfd_num) - goto out; - - IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", - txq_id, txq->read_ptr, tfd_num, ssn); - - /*Since we free until index _not_ inclusive, the one before index is - * the last we will free. This one must be used */ - last_to_free = iwl_txq_dec_wrap(trans, tfd_num); - - if (!iwl_txq_used(txq, last_to_free)) { - IWL_ERR(trans, - "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, last_to_free, - trans->trans_cfg->base_params->max_tfd_queue_size, - txq->write_ptr, txq->read_ptr); - - iwl_op_mode_time_point(trans->op_mode, - IWL_FW_INI_TIME_POINT_FAKE_TX, - NULL); - goto out; - } - - if (WARN_ON(!skb_queue_empty(skbs))) - goto out; - - for (; - read_ptr != tfd_num; - txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), - read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { - struct sk_buff *skb = txq->entries[read_ptr].skb; - - if (WARN_ON_ONCE(!skb)) - continue; - - iwl_txq_free_tso_page(trans, skb); - - __skb_queue_tail(skbs, skb); - - txq->entries[read_ptr].skb = NULL; - - if (!trans->trans_cfg->gen2) - iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); - - iwl_txq_free_tfd(trans, txq); - } - - iwl_txq_progress(txq); - - if (iwl_txq_space(trans, txq) > txq->low_mark && - test_bit(txq_id, trans->txqs.queue_stopped)) { - struct sk_buff_head overflow_skbs; - struct sk_buff *skb; - - __skb_queue_head_init(&overflow_skbs); - skb_queue_splice_init(&txq->overflow_q, - is_flush ? skbs : &overflow_skbs); - - /* - * We are going to transmit from the overflow queue. - * Remember this state so that wait_for_txq_empty will know we - * are adding more packets to the TFD queue. It cannot rely on - * the state of &txq->overflow_q, as we just emptied it, but - * haven't TXed the content yet. - */ - txq->overflow_tx = true; - - /* - * This is tricky: we are in reclaim path which is non - * re-entrant, so noone will try to take the access the - * txq data from that path. We stopped tx, so we can't - * have tx as well. Bottom line, we can unlock and re-lock - * later. - */ - spin_unlock_bh(&txq->lock); - - while ((skb = __skb_dequeue(&overflow_skbs))) { - struct iwl_device_tx_cmd *dev_cmd_ptr; - - dev_cmd_ptr = *(void **)((u8 *)skb->cb + - trans->txqs.dev_cmd_offs); - - /* - * Note that we can very well be overflowing again. - * In that case, iwl_txq_space will be small again - * and we won't wake mac80211's queue. - */ - iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); - } - - if (iwl_txq_space(trans, txq) > txq->low_mark) - iwl_wake_queue(trans, txq); - - spin_lock_bh(&txq->lock); - txq->overflow_tx = false; - } - -out: - spin_unlock_bh(&txq->lock); -} - -/* Set wr_ptr of specific device and txq */ -void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) -{ - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - - spin_lock_bh(&txq->lock); - - txq->write_ptr = ptr; - txq->read_ptr = txq->write_ptr; - - spin_unlock_bh(&txq->lock); -} - -void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, - bool freeze) -{ - int queue; - - for_each_set_bit(queue, &txqs, BITS_PER_LONG) { - struct iwl_txq *txq = trans->txqs.txq[queue]; - unsigned long now; - - spin_lock_bh(&txq->lock); - - now = jiffies; - - if (txq->frozen == freeze) - goto next_queue; - - IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", - freeze ? "Freezing" : "Waking", queue); - - txq->frozen = freeze; - - if (txq->read_ptr == txq->write_ptr) - goto next_queue; - - if (freeze) { - if (unlikely(time_after(now, - txq->stuck_timer.expires))) { - /* - * The timer should have fired, maybe it is - * spinning right now on the lock. - */ - goto next_queue; - } - /* remember how long until the timer fires */ - txq->frozen_expiry_remainder = - txq->stuck_timer.expires - now; - del_timer(&txq->stuck_timer); - goto next_queue; - } - - /* - * Wake a non-empty queue -> arm timer with the - * remainder before it froze - */ - mod_timer(&txq->stuck_timer, - now + txq->frozen_expiry_remainder); - -next_queue: - spin_unlock_bh(&txq->lock); - } -} - -#define HOST_COMPLETE_TIMEOUT (2 * HZ) - -static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; - int cmd_idx; - int ret; - - IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); - - if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, - &trans->status), - "Command %s: a command is already active!\n", cmd_str)) - return -EIO; - - IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); - - cmd_idx = trans->ops->send_cmd(trans, cmd); - if (cmd_idx < 0) { - ret = cmd_idx; - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - cmd_str, ret); - return ret; - } - - ret = wait_event_timeout(trans->wait_command_queue, - !test_bit(STATUS_SYNC_HCMD_ACTIVE, - &trans->status), - HOST_COMPLETE_TIMEOUT); - if (!ret) { - IWL_ERR(trans, "Error sending %s: time out after %dms.\n", - cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - - IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", - txq->read_ptr, txq->write_ptr); - - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - cmd_str); - ret = -ETIMEDOUT; - - iwl_trans_sync_nmi(trans); - goto cancel; - } - - if (test_bit(STATUS_FW_ERROR, &trans->status)) { - if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, - &trans->status)) { - IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); - dump_stack(); - } - ret = -EIO; - goto cancel; - } - - if (!(cmd->flags & CMD_SEND_IN_RFKILL) && - test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { - IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); - ret = -ERFKILL; - goto cancel; - } - - if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { - IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); - ret = -EIO; - goto cancel; - } - - return 0; - -cancel: - if (cmd->flags & CMD_WANT_SKB) { - /* - * Cancel the CMD_WANT_SKB flag for the cmd in the - * TX cmd queue. Otherwise in case the cmd comes - * in later, it will possibly set an invalid - * address (cmd->meta.source). - */ - txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; - } - - if (cmd->resp_pkt) { - iwl_free_resp(cmd); - cmd->resp_pkt = NULL; - } - - return ret; -} - -int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - /* Make sure the NIC is still alive in the bus */ - if (test_bit(STATUS_TRANS_DEAD, &trans->status)) - return -ENODEV; - - if (!(cmd->flags & CMD_SEND_IN_RFKILL) && - test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { - IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", - cmd->id); - return -ERFKILL; - } - - if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && - !(cmd->flags & CMD_SEND_IN_D3))) { - IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); - return -EHOSTDOWN; - } - - if (cmd->flags & CMD_ASYNC) { - int ret; - - /* An asynchronous command can not expect an SKB to be set. */ - if (WARN_ON(cmd->flags & CMD_WANT_SKB)) - return -EINVAL; - - ret = trans->ops->send_cmd(trans, cmd); - if (ret < 0) { - IWL_ERR(trans, - "Error sending %s: enqueue_hcmd failed: %d\n", - iwl_get_cmd_string(trans, cmd->id), ret); - return ret; - } - return 0; - } - - return iwl_trans_txq_send_hcmd_sync(trans, cmd); -} - diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h deleted file mode 100644 index 124b29aac4a1..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h +++ /dev/null @@ -1,191 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* - * Copyright (C) 2020-2023 Intel Corporation - */ -#ifndef __iwl_trans_queue_tx_h__ -#define __iwl_trans_queue_tx_h__ -#include "iwl-fh.h" -#include "fw/api/tx.h" - -struct iwl_tso_hdr_page { - struct page *page; - u8 *pos; -}; - -static inline dma_addr_t -iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) -{ - return txq->first_tb_dma + - sizeof(struct iwl_pcie_first_tb_buf) * idx; -} - -static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index) -{ - return index & (q->n_window - 1); -} - -void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id); - -static inline void iwl_wake_queue(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { - IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); - iwl_op_mode_queue_not_full(trans->op_mode, txq->id); - } -} - -static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, - struct iwl_txq *txq, int idx) -{ - if (trans->trans_cfg->gen2) - idx = iwl_txq_get_cmd_index(txq, idx); - - return (u8 *)txq->tfds + trans->txqs.tfd.size * idx; -} - -int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, - bool cmd_queue); -/* - * We need this inline in case dma_addr_t is only 32-bits - since the - * hardware is always 64-bit, the issue can still occur in that case, - * so use u64 for 'phys' here to force the addition in 64-bit. - */ -static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len) -{ - return upper_32_bits(phys) != upper_32_bits(phys + len); -} - -int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q); - -static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) -{ - if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) { - iwl_op_mode_queue_full(trans->op_mode, txq->id); - IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); - } else { - IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", - txq->id); - } -} - -/** - * iwl_txq_inc_wrap - increment queue index, wrap back to beginning - * @trans: the transport (for configuration data) - * @index: current index - */ -static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) -{ - return ++index & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); -} - -/** - * iwl_txq_dec_wrap - decrement queue index, wrap back to end - * @trans: the transport (for configuration data) - * @index: current index - */ -static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) -{ - return --index & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); -} - -static inline bool iwl_txq_used(const struct iwl_txq *q, int i) -{ - int index = iwl_txq_get_cmd_index(q, i); - int r = iwl_txq_get_cmd_index(q, q->read_ptr); - int w = iwl_txq_get_cmd_index(q, q->write_ptr); - - return w >= r ? - (index >= r && index < w) : - !(index < r && index >= w); -} - -void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb); - -void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); - -int iwl_txq_gen2_set_tb(struct iwl_trans *trans, - struct iwl_tfh_tfd *tfd, dma_addr_t addr, - u16 len); - -void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_tfh_tfd *tfd); - -int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, - u32 sta_mask, u8 tid, - int size, unsigned int timeout); - -int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int txq_id); - -void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); -void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); -void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); -void iwl_txq_gen2_tx_free(struct iwl_trans *trans); -int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, - bool cmd_queue); -int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size); -#ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, - struct sk_buff *skb); -#endif -static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, - struct iwl_tfd *tfd) -{ - return tfd->num_tbs & 0x1f; -} - -static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, - void *_tfd, u8 idx) -{ - struct iwl_tfd *tfd; - struct iwl_tfd_tb *tb; - - if (trans->trans_cfg->gen2) { - struct iwl_tfh_tfd *tfh_tfd = _tfd; - struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; - - return le16_to_cpu(tfh_tb->tb_len); - } - - tfd = (struct iwl_tfd *)_tfd; - tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->hi_n_len) >> 4; -} - -static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_trans *trans, - struct iwl_tfd *tfd, - u8 idx, dma_addr_t addr, u16 len) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - hi_n_len |= iwl_get_dma_hi_addr(addr); - - tb->hi_n_len = cpu_to_le16(hi_n_len); - - tfd->num_tbs = idx + 1; -} - -void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_txq *txq, int index); -void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq); -void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs); -void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs, bool is_flush); -void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); -void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, - bool freeze); -void iwl_txq_progress(struct iwl_txq *txq); -void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); -int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); -#endif /* __iwl_trans_queue_tx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile new file mode 100644 index 000000000000..84491488f589 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +iwlwifi-tests-y += module.o devinfo.o + +ccflags-y += -I$(src)/../ + +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlwifi-tests.o diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c new file mode 100644 index 000000000000..d0bda23c628a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * KUnit tests for the iwlwifi device info table + * + * Copyright (C) 2023-2024 Intel Corporation + */ +#include <kunit/test.h> +#include <linux/pci.h> +#include "iwl-drv.h" +#include "iwl-config.h" + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di) +{ + printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n", + pfx, di->device, di->subdevice, di->mac_type, di->mac_step, + di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160, + di->cores); +} + +static void devinfo_table_order(struct kunit *test) +{ + int idx; + + for (idx = 0; idx < iwl_dev_info_table_size; idx++) { + const struct iwl_dev_info *di = &iwl_dev_info_table[idx]; + const struct iwl_dev_info *ret; + + ret = iwl_pci_find_dev_info(di->device, di->subdevice, + di->mac_type, di->mac_step, + di->rf_type, di->cdb, + di->jacket, di->rf_id, + di->no_160, di->cores, di->rf_step); + if (ret != di) { + iwl_pci_print_dev_info("searched: ", di); + iwl_pci_print_dev_info("found: ", ret); + KUNIT_FAIL(test, + "unusable entry at index %d (found index %d instead)\n", + idx, (int)(ret - iwl_dev_info_table)); + } + } +} + +static void devinfo_pci_ids(struct kunit *test) +{ + struct pci_dev *dev; + + dev = kunit_kmalloc(test, sizeof(*dev), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, dev); + + for (int i = 0; iwl_hw_card_ids[i].vendor; i++) { + const struct pci_device_id *s, *t; + + s = &iwl_hw_card_ids[i]; + dev->vendor = s->vendor; + dev->device = s->device; + dev->subsystem_vendor = s->subvendor; + dev->subsystem_device = s->subdevice; + dev->class = s->class; + + t = pci_match_id(iwl_hw_card_ids, dev); + KUNIT_EXPECT_PTR_EQ(test, t, s); + } +} + +static struct kunit_case devinfo_test_cases[] = { + KUNIT_CASE(devinfo_table_order), + KUNIT_CASE(devinfo_pci_ids), + {} +}; + +static struct kunit_suite iwlwifi_devinfo = { + .name = "iwlwifi-devinfo", + .test_cases = devinfo_test_cases, +}; + +kunit_test_suite(iwlwifi_devinfo); diff --git a/drivers/net/wireless/intel/iwlwifi/tests/module.c b/drivers/net/wireless/intel/iwlwifi/tests/module.c new file mode 100644 index 000000000000..0c54f818e5a7 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/tests/module.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Module boilerplate for the iwlwifi kunit module. + * + * Copyright (C) 2023 Intel Corporation + */ +#include <linux/module.h> + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("kunit tests for iwlwifi"); |