diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath11k/wmi.c')
| -rw-r--r-- | drivers/net/wireless/ath/ath11k/wmi.c | 3144 |
1 files changed, 2554 insertions, 590 deletions
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 6b68ccf65e39..110035dae8a6 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/skbuff.h> #include <linux/ctype.h> @@ -18,6 +19,8 @@ #include "mac.h" #include "hw.h" #include "peer.h" +#include "testmode.h" +#include "p2p.h" struct wmi_tlv_policy { size_t min_len; @@ -81,6 +84,12 @@ struct wmi_tlv_fw_stats_parse { bool chain_rssi_done; }; +struct wmi_tlv_mgmt_rx_parse { + const struct wmi_mgmt_rx_hdr *fixed; + const u8 *frame_buf; + bool frame_buf_done; +}; + static const struct wmi_tlv_policy wmi_tlv_policies[] = { [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, @@ -104,6 +113,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { = { .min_len = sizeof(struct wmi_vdev_stopped_event) }, [WMI_TAG_REG_CHAN_LIST_CC_EVENT] = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) }, + [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] + = { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, [WMI_TAG_MGMT_RX_HDR] = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) }, [WMI_TAG_MGMT_TX_COMPL_EVENT] @@ -128,8 +139,6 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, [WMI_TAG_STATS_EVENT] = { .min_len = sizeof(struct wmi_stats_event) }, - [WMI_TAG_RFKILL_EVENT] = { - .min_len = sizeof(struct wmi_rfkill_state_change_ev) }, [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, [WMI_TAG_HOST_SWFDA_EVENT] = { @@ -144,6 +153,12 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_11d_new_cc_ev) }, [WMI_TAG_PER_CHAIN_RSSI_STATS] = { .min_len = sizeof(struct wmi_per_chain_rssi_stats) }, + [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = { + .min_len = sizeof(struct wmi_twt_add_dialog_event) }, + [WMI_TAG_P2P_NOA_INFO] = { + .min_len = sizeof(struct ath11k_wmi_p2p_noa_info) }, + [WMI_TAG_P2P_NOA_EVENT] = { + .min_len = sizeof(struct wmi_p2p_noa_event) }, }; #define PRIMAP(_hw_mode_) \ @@ -228,9 +243,8 @@ static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb, (void *)tb); } -static const void ** -ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr, - size_t len, gfp_t gfp) +const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, + struct sk_buff *skb, gfp_t gfp) { const void **tb; int ret; @@ -239,7 +253,7 @@ ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr, if (!tb) return ERR_PTR(-ENOMEM); - ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len); + ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len); if (ret) { kfree(tb); return ERR_PTR(ret); @@ -283,18 +297,18 @@ err_pull: int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id) { - struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab; + struct ath11k_wmi_base *wmi_ab = wmi->wmi_ab; int ret = -EOPNOTSUPP; - struct ath11k_base *ab = wmi_sc->ab; + struct ath11k_base *ab = wmi_ab->ab; might_sleep(); if (ab->hw_params.credit_flow) { - wait_event_timeout(wmi_sc->tx_credits_wq, ({ + wait_event_timeout(wmi_ab->tx_credits_wq, ({ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, - &wmi_sc->ab->dev_flags)) + &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -EAGAIN); @@ -304,7 +318,7 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, - &wmi_sc->ab->dev_flags)) + &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -ENOBUFS); @@ -312,10 +326,10 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, } if (ret == -EAGAIN) - ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); + ath11k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); if (ret == -ENOBUFS) - ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n", + ath11k_warn(wmi_ab->ab, "ce desc not available for wmi command %d\n", cmd_id); return ret; @@ -388,6 +402,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id; ab->target_pdev_count++; + if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) && + !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP)) + return -EINVAL; + /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from * band to band for a single radio, need to see how this should be * handled. @@ -395,7 +413,9 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) { pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g; pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g; - } else if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { + } + + if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g; pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g; pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g; @@ -405,13 +425,11 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio); pdev_cap->nss_ratio_info = WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio); - } else { - return -EINVAL; } /* tx/rx chainmask reported from fw depends on the actual hw chains used, * For example, for 4x4 capable macphys, first 4 chains can be used for first - * mac and the remaing 4 chains can be used for the second mac or vice-versa. + * mac and the remaining 4 chains can be used for the second mac or vice-versa. * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 * will be advertised for second mac or vice-versa. Compute the shift value * for tx/rx chainmask which will be used to advertise supported ht/vht rates to @@ -526,8 +544,6 @@ static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab, cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index; cap->num_msdu_desc = ev->num_msdu_desc; - ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi sys cap info 0x%x\n", cap->sys_cap_info); - return 0; } @@ -595,13 +611,15 @@ static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *sk return ret; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready"); + return 0; } -struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len) +struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_ab, u32 len) { struct sk_buff *skb; - struct ath11k_base *ab = wmi_sc->ab; + struct ath11k_base *ab = wmi_ab->ab; u32 round_len = roundup(len, 4); skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); @@ -618,10 +636,25 @@ struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len) return skb; } +static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar, + struct ieee80211_tx_info *info) +{ + struct ath11k_base *ab = ar->ab; + u32 freq = 0; + + if (ab->hw_params.support_off_channel_tx && + ar->scan.is_roc && + (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) + freq = ar->scan.roc_freq; + + return freq; +} + int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, struct sk_buff *frame) { struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); struct wmi_mgmt_send_cmd *cmd; struct wmi_tlv *frame_tlv; struct sk_buff *skb; @@ -642,7 +675,7 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->desc_id = buf_id; - cmd->chanfreq = 0; + cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info); cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->frame_len = frame->len; @@ -664,6 +697,8 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd mgmt tx send"); + return ret; } @@ -698,6 +733,9 @@ int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr, cmd->vdev_subtype = param->subtype; cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX; cmd->pdev_id = param->pdev_id; + cmd->mbssid_flags = param->mbssid_flags; + cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id; + ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); ptr = skb->data + sizeof(*cmd); @@ -737,7 +775,7 @@ int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", + "cmd vdev create id %d type %d subtype %d macaddr %pM pdevid %d\n", param->if_id, param->type, param->subtype, macaddr, param->pdev_id); @@ -766,7 +804,7 @@ int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id) dev_kfree_skb(skb); } - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev delete id %d\n", vdev_id); return ret; } @@ -794,7 +832,7 @@ int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id) dev_kfree_skb(skb); } - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id); + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev stop id 0x%x\n", vdev_id); return ret; } @@ -822,7 +860,7 @@ int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id) dev_kfree_skb(skb); } - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id); + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev down id 0x%x\n", vdev_id); return ret; } @@ -845,7 +883,8 @@ static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan, chan->band_center_freq2 = arg->channel.band_center_freq1; - } else if (arg->channel.mode == MODE_11AC_VHT80_80) { + } else if ((arg->channel.mode == MODE_11AC_VHT80_80) || + (arg->channel.mode == MODE_11AX_HE80_80)) { chan->band_center_freq2 = arg->channel.band_center_freq2; } else { chan->band_center_freq2 = 0; @@ -914,6 +953,8 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, cmd->cac_duration_ms = arg->cac_duration_ms; cmd->regdomain = arg->regdomain; cmd->he_ops = arg->he_ops; + cmd->mbssid_flags = arg->mbssid_flags; + cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id; if (!restart) { if (arg->ssid) { @@ -945,7 +986,7 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, FIELD_PREP(WMI_TLV_LEN, 0); /* Note: This is a nested TLV containing: - * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. + * [wmi_tlv][ath11k_wmi_p2p_noa_descriptor][wmi_tlv].. */ ptr += sizeof(*tlv); @@ -962,20 +1003,25 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, dev_kfree_skb(skb); } - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n", + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev %s id 0x%x freq 0x%x mode 0x%x\n", restart ? "restart" : "start", arg->vdev_id, arg->channel.freq, arg->channel.mode); return ret; } -int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid) +int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid, + u8 *tx_bssid, u32 nontx_profile_idx, u32 nontx_profile_cnt) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_up_cmd *cmd; + struct ieee80211_bss_conf *bss_conf; + struct ath11k_vif *arvif; struct sk_buff *skb; int ret; + arvif = ath11k_mac_get_arvif(ar, vdev_id); + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -989,6 +1035,22 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid) ether_addr_copy(cmd->vdev_bssid.addr, bssid); + cmd->nontx_profile_idx = nontx_profile_idx; + cmd->nontx_profile_cnt = nontx_profile_cnt; + if (tx_bssid) + ether_addr_copy(cmd->tx_vdev_bssid.addr, tx_bssid); + + if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) { + bss_conf = &arvif->vif->bss_conf; + + if (bss_conf->nontransmitted) { + ether_addr_copy(cmd->tx_vdev_bssid.addr, + bss_conf->transmitter_bssid); + cmd->nontx_profile_idx = bss_conf->bssid_index; + cmd->nontx_profile_cnt = bss_conf->bssid_indicator; + } + } + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); @@ -996,7 +1058,7 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid) } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n", + "cmd vdev up id 0x%x assoc id %d bssid %pM\n", vdev_id, aid, bssid); return ret; @@ -1029,7 +1091,7 @@ int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI peer create vdev_id %d peer_addr %pM\n", + "cmd peer create vdev_id %d peer_addr %pM\n", param->vdev_id, param->peer_addr); return ret; @@ -1054,16 +1116,16 @@ int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar, ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); cmd->vdev_id = vdev_id; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI peer delete vdev_id %d peer_addr %pM\n", - vdev_id, peer_addr); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd peer delete vdev_id %d peer_addr %pM\n", + vdev_id, peer_addr); + return ret; } @@ -1092,11 +1154,6 @@ int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar, cmd->dfs_domain = param->dfs_domain; cmd->pdev_id = param->pdev_id; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", - param->current_rd_in_use, param->current_rd_2g, - param->current_rd_5g, param->dfs_domain, param->pdev_id); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); if (ret) { ath11k_warn(ar->ab, @@ -1104,6 +1161,11 @@ int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", + param->current_rd_in_use, param->current_rd_2g, + param->current_rd_5g, param->dfs_domain, param->pdev_id); + return ret; } @@ -1134,7 +1196,7 @@ int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI vdev %d peer 0x%pM set param %d value %d\n", + "cmd peer set param vdev %d peer 0x%pM set param %d value %d\n", vdev_id, peer_addr, param_id, param_val); return ret; @@ -1169,7 +1231,7 @@ int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n", + "cmd peer flush tids vdev_id %d peer_addr %pM tids %08x\n", param->vdev_id, peer_addr, param->peer_tid_bitmap); return ret; @@ -1212,7 +1274,7 @@ int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n", + "cmd peer reorder queue setup addr %pM vdev_id %d tid %d\n", addr, vdev_id, tid); return ret; @@ -1240,10 +1302,6 @@ ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar, cmd->vdev_id = param->vdev_id; cmd->tid_mask = param->peer_tid_bitmap; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__, - param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); if (ret) { @@ -1252,6 +1310,10 @@ ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd peer reorder queue remove peer_macaddr %pM vdev_id %d tid_map %d", + param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap); + return ret; } @@ -1281,7 +1343,7 @@ int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI pdev set param %d pdev id %d value %d\n", + "cmd pdev set param %d pdev id %d value %d\n", param_id, pdev_id, param_value); return ret; @@ -1312,7 +1374,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI vdev set psmode %d vdev id %d\n", + "cmd sta powersave mode psmode %d vdev id %d\n", psmode, vdev_id); return ret; @@ -1345,7 +1407,7 @@ int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI pdev suspend pdev_id %d\n", pdev_id); + "cmd pdev suspend pdev_id %d\n", pdev_id); return ret; } @@ -1367,15 +1429,15 @@ int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id) FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI pdev resume pdev id %d\n", pdev_id); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev resume pdev id %d\n", pdev_id); + return ret; } @@ -1403,9 +1465,6 @@ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar, cmd->req_type = type; cmd->pdev_id = ar->pdev->pdev_id; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI bss chan info req type %d\n", type); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); if (ret) { @@ -1414,6 +1473,9 @@ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev bss chan info request type %d\n", type); + return ret; } @@ -1446,7 +1508,7 @@ int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI set ap ps vdev id %d peer %pM param %d value %d\n", + "cmd ap ps peer param vdev id %d peer %pM param %d value %d\n", param->vdev_id, peer_addr, param->param, param->value); return ret; @@ -1473,16 +1535,16 @@ int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id, cmd->param = param; cmd->value = param_value; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI set sta ps vdev_id %d param %d value %d\n", - vdev_id, param, param_value); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd set powersave param vdev_id %d param %d value %d\n", + vdev_id, param, param_value); + return ret; } @@ -1512,6 +1574,9 @@ int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms) ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); dev_kfree_skb(skb); } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd force fw hang"); + return ret; } @@ -1543,7 +1608,7 @@ int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI vdev id 0x%x set param %d value %d\n", + "cmd vdev set param vdev 0x%x param %d value %d\n", vdev_id, param_id, param_value); return ret; @@ -1576,7 +1641,7 @@ int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI request stats 0x%x vdev id %d pdev id %d\n", + "cmd request stats 0x%x vdev id %d pdev id %d\n", param->stats_id, param->vdev_id, param->pdev_id); return ret; @@ -1605,7 +1670,7 @@ int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar) } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); + "cmd pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); return ret; } @@ -1630,10 +1695,6 @@ int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar, cmd->vdev_id = vdev_id; cmd->bcn_ctrl_op = bcn_ctrl_op; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI bcn ctrl offload vdev id %d ctrl_op %d\n", - vdev_id, bcn_ctrl_op); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); if (ret) { ath11k_warn(ar->ab, @@ -1641,12 +1702,55 @@ int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd bcn offload ctrl vdev id %d ctrl_op %d\n", + vdev_id, bcn_ctrl_op); + + return ret; +} + +int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id, + const u8 *p2p_ie) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_p2p_go_set_beacon_ie_cmd *cmd; + size_t p2p_ie_len, aligned_len; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int ret, len; + + p2p_ie_len = p2p_ie[1] + 2; + aligned_len = roundup(p2p_ie_len, 4); + + len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_p2p_go_set_beacon_ie_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_P2P_GO_SET_BEACON_IE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->vdev_id = vdev_id; + cmd->ie_buf_len = p2p_ie_len; + + tlv = (struct wmi_tlv *)cmd->tlv; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, aligned_len); + memcpy(tlv->value, p2p_ie, p2p_ie_len); + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); + if (ret) { + ath11k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); + dev_kfree_skb(skb); + } + return ret; } int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, struct ieee80211_mutable_offsets *offs, - struct sk_buff *bcn) + struct sk_buff *bcn, u32 ema_params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_bcn_tmpl_cmd *cmd; @@ -1678,12 +1782,14 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, cmd->vdev_id = vdev_id; cmd->tim_ie_offset = offs->tim_offset; - if (vif->csa_active) { + if (vif->bss_conf.csa_active) { cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0]; cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1]; } cmd->buf_len = bcn->len; + cmd->mbssid_ie_offset = offs->mbssid_off; + cmd->ema_params = ema_params; ptr = skb->data + sizeof(*cmd); @@ -1708,6 +1814,8 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bcn tmpl"); + return ret; } @@ -1757,7 +1865,7 @@ int ath11k_wmi_vdev_install_key(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI vdev install key idx %d cipher %d len %d\n", + "cmd vdev install key idx %d cipher %d len %d\n", arg->key_idx, arg->key_cipher, arg->key_len); return ret; @@ -1953,10 +2061,13 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override; if (param->vht_capable) { - mcs->rx_max_rate = param->rx_max_rate; - mcs->rx_mcs_set = param->rx_mcs_set; - mcs->tx_max_rate = param->tx_max_rate; - mcs->tx_mcs_set = param->tx_mcs_set; + /* firmware interprets mcs->tx_mcs_set field as peer's + * RX capability + */ + mcs->tx_max_rate = param->rx_max_rate; + mcs->tx_mcs_set = param->rx_mcs_set; + mcs->rx_max_rate = param->tx_max_rate; + mcs->rx_mcs_set = param->tx_mcs_set; } /* HE Rates */ @@ -1980,8 +2091,11 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, FIELD_PREP(WMI_TLV_LEN, sizeof(*he_mcs) - TLV_HDR_SIZE); - he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i]; - he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i]; + /* firmware interprets mcs->rx_mcs_set field as peer's + * RX capability + */ + he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i]; + he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i]; ptr += sizeof(*he_mcs); } @@ -1993,7 +2107,7 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n", + "cmd peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n", cmd->vdev_id, cmd->peer_associd, param->peer_mac, cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, cmd->peer_listen_intval, cmd->peer_ht_caps, @@ -2013,7 +2127,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar, { /* setup commonly used values */ arg->scan_req_id = 1; - arg->scan_priority = WMI_SCAN_PRIORITY_LOW; + if (ar->state_11d == ATH11K_11D_PREPARING) + arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM; + else + arg->scan_priority = WMI_SCAN_PRIORITY_LOW; arg->dwell_time_active = 50; arg->dwell_time_active_2g = 0; arg->dwell_time_passive = 150; @@ -2031,7 +2148,13 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar, WMI_SCAN_EVENT_BSS_CHANNEL | WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED; - arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT; + arg->scan_f_chan_stat_evnt = 1; + + if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE, + ar->ab->wmi_ab.svc_map)) + arg->scan_ctrl_flags_ext |= + WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE; + arg->num_bssid = 1; /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be @@ -2113,6 +2236,8 @@ ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, /* for adaptive scan mode using 3 bits (21 - 23 bits) */ WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags, param->adaptive_dwell_time_mode); + + cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext; } int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, @@ -2206,7 +2331,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; - tmp_ptr = (u32 *)ptr; + tmp_ptr = ptr; for (i = 0; i < params->num_chan; ++i) tmp_ptr[i] = params->chan_list[i]; @@ -2299,9 +2424,75 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd start scan"); + return ret; } +int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar, + u32 vdev_id, + struct ath11k_reg_tpc_power_info *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_set_tpc_power_cmd *cmd; + struct wmi_vdev_ch_power_info *ch; + struct sk_buff *skb; + struct wmi_tlv *tlv; + u8 *ptr; + int i, ret, len, array_len; + + array_len = sizeof(*ch) * param->num_pwr_levels; + len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + ptr = skb->data; + + cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->vdev_id = vdev_id; + cmd->psd_power = param->is_psd_power; + cmd->eirp_power = param->eirp_power; + cmd->power_type_6ghz = param->ap_power_type; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", + vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); + + ptr += sizeof(*cmd); + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, array_len); + + ptr += TLV_HDR_SIZE; + ch = (struct wmi_vdev_ch_power_info *)ptr; + + for (i = 0; i < param->num_pwr_levels; i++, ch++) { + ch->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_VDEV_CH_POWER_INFO) | + FIELD_PREP(WMI_TLV_LEN, + sizeof(*ch) - TLV_HDR_SIZE); + + ch->chan_cfreq = param->chan_power_info[i].chan_cfreq; + ch->tx_power = param->chan_power_info[i].tx_power; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n", + ch->chan_cfreq, ch->tx_power); + } + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); + if (ret) { + ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); + dev_kfree_skb(skb); + return ret; + } + + return 0; +} + int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, struct scan_cancel_param *param) { @@ -2347,6 +2538,8 @@ int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd stop scan"); + return ret; } @@ -2391,7 +2584,7 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", + "no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", num_send_chans, len, cmd->pdev_id, num_sends); ptr = skb->data + sizeof(*cmd); @@ -2450,7 +2643,7 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, tchan_info->maxregpower); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", + "chan scan list chan[%d] = %u, chan_info->info %8x\n", i, chan_info->mhz, chan_info->info); ptr += sizeof(*chan_info); @@ -2465,6 +2658,9 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, return ret; } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd scan chan list channels %d", + num_send_chans); + num_sends++; } @@ -2472,7 +2668,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, } int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, - struct wmi_wmm_params_all_arg *param) + struct wmi_wmm_params_all_arg *param, + enum wmi_wmm_params_type wmm_param_type) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_set_wmm_params_cmd *cmd; @@ -2491,7 +2688,7 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; - cmd->wmm_param_type = 0; + cmd->wmm_param_type = wmm_param_type; for (ac = 0; ac < WME_NUM_AC; ac++) { switch (ac) { @@ -2524,8 +2721,8 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, wmm_param->no_ack = wmi_wmm_arg->no_ack; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", - ac, wmm_param->aifs, wmm_param->cwmin, + "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", + wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin, wmm_param->cwmax, wmm_param->txoplimit, wmm_param->acm, wmm_param->no_ack); } @@ -2537,6 +2734,8 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev set wmm params"); + return ret; } @@ -2560,9 +2759,6 @@ int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar, cmd->pdev_id = pdev_id; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI dfs phy err offload enable pdev id %d\n", pdev_id); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); if (ret) { @@ -2571,6 +2767,9 @@ int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev dfs phyerr offload enable pdev id %d\n", pdev_id); + return ret; } @@ -2595,10 +2794,6 @@ int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, cmd->initiator = initiator; cmd->reasoncode = reason; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", - vdev_id, mac, tid, initiator, reason); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); if (ret) { @@ -2607,6 +2802,10 @@ int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", + vdev_id, mac, tid, initiator, reason); + return ret; } @@ -2631,10 +2830,6 @@ int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac, cmd->tid = tid; cmd->statuscode = status; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", - vdev_id, mac, tid, status); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); if (ret) { @@ -2643,6 +2838,10 @@ int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", + vdev_id, mac, tid, status); + return ret; } @@ -2666,10 +2865,6 @@ int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, cmd->tid = tid; cmd->buffersize = buf_size; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", - vdev_id, mac, tid, buf_size); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); if (ret) { @@ -2678,6 +2873,10 @@ int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", + vdev_id, mac, tid, buf_size); + return ret; } @@ -2699,10 +2898,6 @@ int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac) cmd->vdev_id = vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, mac); - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", - vdev_id, mac); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); if (ret) { @@ -2711,6 +2906,10 @@ int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac) dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd addba clear resp vdev_id 0x%X mac_addr %pM\n", + vdev_id, mac); + return ret; } @@ -2759,6 +2958,8 @@ int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable) dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog filter"); + return ret; } @@ -2798,21 +2999,27 @@ ath11k_wmi_send_init_country_cmd(struct ath11k *ar, cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id; break; default: + ath11k_warn(ar->ab, "unknown cc params flags: 0x%x", + init_cc_params.flags); ret = -EINVAL; - goto out; + goto err; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_INIT_COUNTRY_CMDID); - -out: if (ret) { ath11k_warn(ar->ab, "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", ret); - dev_kfree_skb(skb); + goto err; } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set init country"); + + return 0; + +err: + dev_kfree_skb(skb); return ret; } @@ -2835,20 +3042,20 @@ int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar, cmd->pdev_id = ar->pdev->pdev_id; memcpy(&cmd->new_alpha2, ¶m->alpha2, 3); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); - - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "set current country pdev id %d alpha2 %c%c\n", - ar->pdev->pdev_id, - param->alpha2[0], - param->alpha2[1]); + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret); dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd set current country pdev id %d alpha2 %c%c\n", + ar->pdev->pdev_id, + param->alpha2[0], + param->alpha2[1]); + return ret; } @@ -2909,7 +3116,7 @@ ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI vdev set thermal throt pdev_id %d enable %d dc %d dc_per_event %x levels %d\n", + "cmd therm throt set conf pdev_id %d enable %d dc %d dc_per_event %x levels %d\n", ar->pdev->pdev_id, param->enable, param->dc, param->dc_per_event, THERMAL_LEVELS); @@ -2936,20 +3143,20 @@ int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar, cmd->vdev_id = param->vdev_id; cmd->scan_period_msec = param->scan_period_msec; cmd->start_interval_msec = param->start_interval_msec; - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); - - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "send 11d scan start vdev id %d period %d ms internal %d ms\n", - cmd->vdev_id, - cmd->scan_period_msec, - cmd->start_interval_msec); + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret); dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd 11d scan start vdev id %d period %d ms internal %d ms\n", + cmd->vdev_id, + cmd->scan_period_msec, + cmd->start_interval_msec); + return ret; } @@ -2970,18 +3177,18 @@ int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id) FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); - - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "send 11d scan stop vdev id %d\n", - cmd->vdev_id); + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret); dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd 11d scan stop vdev id %d\n", + cmd->vdev_id); + return ret; } @@ -3012,6 +3219,8 @@ int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter) dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog enable"); + return ret; } @@ -3040,11 +3249,39 @@ int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar) dev_kfree_skb(skb); } + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog disable"); + return ret; } -int -ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id) +void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params) +{ + twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS; + twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE; + twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP; + twt_params->congestion_thresh_teardown = + ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN; + twt_params->congestion_thresh_critical = + ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL; + twt_params->interference_thresh_teardown = + ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN; + twt_params->interference_thresh_setup = + ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP; + twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP; + twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN; + twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS; + twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS; + twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT; + twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL; + twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL; + twt_params->remove_sta_slot_interval = + ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL; + /* TODO add MBSSID support */ + twt_params->mbss_support = 0; +} + +int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id, + struct wmi_twt_enable_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; @@ -3062,36 +3299,35 @@ ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id) cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; - cmd->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS; - cmd->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE; - cmd->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP; - cmd->congestion_thresh_teardown = - ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN; - cmd->congestion_thresh_critical = - ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL; - cmd->interference_thresh_teardown = - ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN; - cmd->interference_thresh_setup = - ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP; - cmd->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP; - cmd->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN; - cmd->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS; - cmd->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS; - cmd->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT; - cmd->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL; - cmd->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL; - cmd->remove_sta_slot_interval = - ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL; - /* TODO add MBSSID support */ - cmd->mbss_support = 0; - - ret = ath11k_wmi_cmd_send(wmi, skb, - WMI_TWT_ENABLE_CMDID); + cmd->sta_cong_timer_ms = params->sta_cong_timer_ms; + cmd->default_slot_size = params->default_slot_size; + cmd->congestion_thresh_setup = params->congestion_thresh_setup; + cmd->congestion_thresh_teardown = params->congestion_thresh_teardown; + cmd->congestion_thresh_critical = params->congestion_thresh_critical; + cmd->interference_thresh_teardown = params->interference_thresh_teardown; + cmd->interference_thresh_setup = params->interference_thresh_setup; + cmd->min_no_sta_setup = params->min_no_sta_setup; + cmd->min_no_sta_teardown = params->min_no_sta_teardown; + cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots; + cmd->min_no_twt_slots = params->min_no_twt_slots; + cmd->max_no_sta_twt = params->max_no_sta_twt; + cmd->mode_check_interval = params->mode_check_interval; + cmd->add_sta_slot_interval = params->add_sta_slot_interval; + cmd->remove_sta_slot_interval = params->remove_sta_slot_interval; + cmd->mbss_support = params->mbss_support; + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + + ar->twt_enabled = 1; + + ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt enable"); + + return 0; } int @@ -3114,13 +3350,194 @@ ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id) FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; - ret = ath11k_wmi_cmd_send(wmi, skb, - WMI_TWT_DISABLE_CMDID); + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + + ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt disable"); + + ar->twt_enabled = 0; + + return 0; +} + +int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar, + struct wmi_twt_add_dialog_params *params) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ath11k_base *ab = wmi->wmi_ab->ab; + struct wmi_twt_add_dialog_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + + cmd->vdev_id = params->vdev_id; + ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); + cmd->dialog_id = params->dialog_id; + cmd->wake_intvl_us = params->wake_intvl_us; + cmd->wake_intvl_mantis = params->wake_intvl_mantis; + cmd->wake_dura_us = params->wake_dura_us; + cmd->sp_offset_us = params->sp_offset_us; + cmd->flags = params->twt_cmd; + if (params->flag_bcast) + cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST; + if (params->flag_trigger) + cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER; + if (params->flag_flow_type) + cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE; + if (params->flag_protection) + cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION; + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID); + if (ret) { + ath11k_warn(ab, + "failed to send wmi command to add twt dialog: %d", + ret); + dev_kfree_skb(skb); + return ret; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd twt add dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n", + cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us, + cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us, + cmd->flags); + + return 0; +} + +int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar, + struct wmi_twt_del_dialog_params *params) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ath11k_base *ab = wmi->wmi_ab->ab; + struct wmi_twt_del_dialog_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + + cmd->vdev_id = params->vdev_id; + ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); + cmd->dialog_id = params->dialog_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID); + if (ret) { + ath11k_warn(ab, + "failed to send wmi command to delete twt dialog: %d", + ret); + dev_kfree_skb(skb); + return ret; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd twt del dialog vdev %u dialog id %u\n", + cmd->vdev_id, cmd->dialog_id); + + return 0; +} + +int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar, + struct wmi_twt_pause_dialog_params *params) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ath11k_base *ab = wmi->wmi_ab->ab; + struct wmi_twt_pause_dialog_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_TWT_PAUSE_DIALOG_CMD) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + + cmd->vdev_id = params->vdev_id; + ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); + cmd->dialog_id = params->dialog_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID); + if (ret) { + ath11k_warn(ab, + "failed to send wmi command to pause twt dialog: %d", + ret); + dev_kfree_skb(skb); + return ret; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd twt pause dialog vdev %u dialog id %u\n", + cmd->vdev_id, cmd->dialog_id); + + return 0; +} + +int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar, + struct wmi_twt_resume_dialog_params *params) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ath11k_base *ab = wmi->wmi_ab->ab; + struct wmi_twt_resume_dialog_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_TWT_RESUME_DIALOG_CMD) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + + cmd->vdev_id = params->vdev_id; + ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); + cmd->dialog_id = params->dialog_id; + cmd->sp_offset_us = params->sp_offset_us; + cmd->next_twt_size = params->next_twt_size; + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID); + if (ret) { + ath11k_warn(ab, + "failed to send wmi command to resume twt dialog: %d", + ret); + dev_kfree_skb(skb); + return ret; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd twt resume dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n", + cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us, + cmd->next_twt_size); + + return 0; } int @@ -3154,8 +3571,12 @@ ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id, ath11k_warn(ab, "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + + ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev obss pd spatial reuse"); + + return 0; } int @@ -3180,19 +3601,20 @@ ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap) cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "obss pd pdev_id %d bss color bitmap %08x %08x\n", - cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev set srg bss color bitmap pdev_id %d bss color bitmap %08x %08x\n", + cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); + + return 0; } int @@ -3218,19 +3640,20 @@ ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap) cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "obss pd pdev_id %d partial bssid bitmap %08x %08x\n", - cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev set srg partial bssid bitmap pdev_id %d partial bssid bitmap %08x %08x\n", + cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); + + return 0; } int @@ -3256,19 +3679,20 @@ ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap) cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "obss pd srg pdev_id %d bss color enable bitmap %08x %08x\n", - cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev set srg obsscolor enable pdev_id %d bss color enable bitmap %08x %08x\n", + cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); + + return 0; } int @@ -3294,19 +3718,20 @@ ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap) cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "obss pd srg pdev_id %d bssid enable bitmap %08x %08x\n", - cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev set srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n", + cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); + + return 0; } int @@ -3332,19 +3757,20 @@ ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap) cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "obss pd non_srg pdev_id %d bss color enable bitmap %08x %08x\n", - cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev set non srg obss color enable bitmap pdev_id %d bss color enable bitmap %08x %08x\n", + cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); + + return 0; } int @@ -3370,19 +3796,20 @@ ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap) cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "obss pd non_srg pdev_id %d bssid enable bitmap %08x %08x\n", - cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd pdev set non srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n", + cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); + + return 0; } int @@ -3415,18 +3842,20 @@ ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id, cmd->free_slot_expiry_time_ms = 0; cmd->flags = 0; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n", - cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, - cmd->detection_period_ms, cmd->scan_period_ms); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd obss color collision det config id %d type %d bss_color %d detect_period %d scan_period %d\n", + cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, + cmd->detection_period_ms, cmd->scan_period_ms); + + return 0; } int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id, @@ -3450,17 +3879,19 @@ int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id, cmd->vdev_id = vdev_id; cmd->enable = enable ? 1 : 0; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi_send_bss_color_change_enable id %d enable %d\n", - cmd->vdev_id, cmd->enable); - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); dev_kfree_skb(skb); + return ret; } - return ret; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "cmd bss color change enable id %d enable %d\n", + cmd->vdev_id, cmd->enable); + + return 0; } int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id, @@ -3477,7 +3908,7 @@ int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id, len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI vdev %i set FILS discovery template\n", vdev_id); + "vdev %i set FILS discovery template\n", vdev_id); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) @@ -3502,8 +3933,12 @@ int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id, "WMI vdev %i failed to send FILS discovery template command\n", vdev_id); dev_kfree_skb(skb); + return ret; } - return ret; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd fils discovery tmpl"); + + return 0; } int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id, @@ -3518,7 +3953,7 @@ int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id, size_t aligned_len = roundup(tmpl->len, 4); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI vdev %i set probe response template\n", vdev_id); + "vdev %i set probe response template\n", vdev_id); len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; @@ -3555,8 +3990,12 @@ int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id, "WMI vdev %i failed to send probe response template command\n", vdev_id); dev_kfree_skb(skb); + return ret; } - return ret; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd "); + + return 0; } int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, @@ -3567,7 +4006,7 @@ int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, struct wmi_fils_discovery_cmd *cmd; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI vdev %i set %s interval to %u TU\n", + "vdev %i set %s interval to %u TU\n", vdev_id, unsol_bcast_probe_resp_enabled ? "unsolicited broadcast probe response" : "FILS discovery", interval); @@ -3590,8 +4029,12 @@ int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, "WMI vdev %i failed to send FILS discovery enable/disable command\n", vdev_id); dev_kfree_skb(skb); + return ret; } - return ret; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd enable fils"); + + return 0; } static void @@ -3602,13 +4045,15 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk struct ath11k_vif *arvif; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event obss color collision"); + rcu_read_lock(); ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; @@ -3626,7 +4071,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk switch (ev->evt_type) { case WMI_BSS_COLOR_COLLISION_DETECTION: - ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap); + ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap, + 0); ath11k_dbg(ab, ATH11K_DBG_WMI, "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n", ev->vdev_id, ev->evt_type, ev->obss_color_bitmap); @@ -3738,6 +4184,13 @@ ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg, wmi_cfg->sched_params = tg_cfg->sched_params; wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count; wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count; + wmi_cfg->host_service_flags &= + ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT); + wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported << + WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT); + wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET; + wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt; + wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period; } static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, @@ -3795,7 +4248,7 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id; ath11k_dbg(ab, ATH11K_DBG_WMI, - "WMI host mem chunk req_id %d paddr 0x%llx len %d\n", + "host mem chunk req_id %d paddr 0x%llx len %d\n", param->mem_chunks[idx].req_id, (u64)param->mem_chunks[idx].paddr, param->mem_chunks[idx].len); @@ -3810,7 +4263,7 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, ptr += TLV_HDR_SIZE + len; if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) { - hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr; + hw_mode = ptr; hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, @@ -3830,7 +4283,7 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, len = sizeof(*band_to_mac); for (idx = 0; idx < param->num_band_to_mac; idx++) { - band_to_mac = (void *)ptr; + band_to_mac = ptr; band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_BAND_TO_MAC) | @@ -3849,9 +4302,12 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, if (ret) { ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n"); dev_kfree_skb(skb); + return ret; } - return ret; + ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd wmi init"); + + return 0; } int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, @@ -3882,7 +4338,7 @@ int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id); + "cmd lro config pdev_id 0x%x\n", pdev_id); return 0; err: dev_kfree_skb(skb); @@ -3940,14 +4396,17 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab, if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); dev_kfree_skb(skb); + return ret; } - return ret; + ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev set hw mode %d", cmd->hw_mode_index); + + return 0; } int ath11k_wmi_cmd_init(struct ath11k_base *ab) { - struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab; + struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; struct wmi_init_cmd_param init_param; struct target_resource_config config; @@ -3956,12 +4415,16 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab) ab->hw_params.hw_ops->wmi_init_config(ab, &config); - memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config)); + if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, + ab->wmi_ab.svc_map)) + config.is_reg_cc_ext_event_supported = 1; - init_param.res_cfg = &wmi_sc->wlan_resource_config; - init_param.num_mem_chunks = wmi_sc->num_mem_chunks; - init_param.hw_mode_id = wmi_sc->preferred_hw_mode; - init_param.mem_chunks = wmi_sc->mem_chunks; + memcpy(&wmi_ab->wlan_resource_config, &config, sizeof(config)); + + init_param.res_cfg = &wmi_ab->wlan_resource_config; + init_param.num_mem_chunks = wmi_ab->num_mem_chunks; + init_param.hw_mode_id = wmi_ab->preferred_hw_mode; + init_param.mem_chunks = wmi_ab->mem_chunks; if (ab->hw_params.single_pdev_only) init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX; @@ -3969,7 +4432,7 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab) init_param.num_band_to_mac = ab->num_radios; ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac); - return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param); + return ath11k_init_cmd_send(&wmi_ab->wmi[0], &init_param); } int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, @@ -3999,7 +4462,7 @@ int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI spectral scan config cmd vdev_id 0x%x\n", + "cmd vdev spectral scan configure vdev_id 0x%x\n", param->vdev_id); return 0; @@ -4037,7 +4500,7 @@ int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI spectral enable cmd vdev id 0x%x\n", + "cmd vdev spectral scan enable vdev id 0x%x\n", vdev_id); return 0; @@ -4083,7 +4546,7 @@ int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI DMA ring cfg req cmd pdev_id 0x%x\n", + "cmd pdev dma ring cfg req pdev_id 0x%x\n", param->pdev_id); return 0; @@ -4189,6 +4652,8 @@ static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab, return; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev dma ring buf release"); + param.fixed = parse.fixed; param.buf_entry = parse.buf_entry; param.num_buf_entry = parse.num_buf_entry; @@ -4399,6 +4864,14 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc, soc->pdevs[0].pdev_id = 0; } + if (!soc->reg_info_store) { + soc->reg_info_store = kcalloc(soc->num_radios, + sizeof(*soc->reg_info_store), + GFP_ATOMIC); + if (!soc->reg_info_store) + return -ENOMEM; + } + return 0; } @@ -4436,6 +4909,7 @@ static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab) { kfree(ab->db_caps); ab->db_caps = NULL; + ab->num_db_cap = 0; } static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab, @@ -4583,6 +5057,8 @@ static int ath11k_service_ready_ext_event(struct ath11k_base *ab, goto err; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext"); + if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) complete(&ab->wmi_ab.service_ready); @@ -4633,6 +5109,8 @@ static int ath11k_service_ready_ext2_event(struct ath11k_base *ab, goto err; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext2"); + complete(&ab->wmi_ab.service_ready); return 0; @@ -4649,7 +5127,7 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf const struct wmi_vdev_start_resp_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4674,11 +5152,32 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf vdev_rsp->mac_id = ev->mac_id; vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams; vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams; + vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power; kfree(tb); return 0; } +static void ath11k_print_reg_rule(struct ath11k_base *ab, const char *band, + u32 num_reg_rules, + struct cur_reg_rule *reg_rule_ptr) +{ + struct cur_reg_rule *reg_rule = reg_rule_ptr; + u32 count; + + ath11k_dbg(ab, ATH11K_DBG_WMI, "number of reg rules in %s band: %d\n", + band, num_reg_rules); + + for (count = 0; count < num_reg_rules; count++) { + ath11k_dbg(ab, ATH11K_DBG_WMI, + "reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n", + count + 1, reg_rule->start_freq, reg_rule->end_freq, + reg_rule->max_bw, reg_rule->ant_gain, + reg_rule->reg_power, reg_rule->flags); + reg_rule++; + } +} + static struct cur_reg_rule *create_reg_rules_from_wmi(u32 num_reg_rules, struct wmi_regulatory_rule_struct *wmi_reg_rule) @@ -4723,12 +5222,12 @@ static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab, const void **tb; const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr; struct wmi_regulatory_rule_struct *wmi_reg_rule; - u32 num_2g_reg_rules, num_5g_reg_rules; + u32 num_2ghz_reg_rules, num_5ghz_reg_rules; int ret; ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n"); - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4742,10 +5241,10 @@ static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab, return -EPROTO; } - reg_info->num_2g_reg_rules = chan_list_event_hdr->num_2g_reg_rules; - reg_info->num_5g_reg_rules = chan_list_event_hdr->num_5g_reg_rules; + reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules; + reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules; - if (!(reg_info->num_2g_reg_rules + reg_info->num_5g_reg_rules)) { + if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) { ath11k_warn(ab, "No regulatory rules available in the event info\n"); kfree(tb); return -EINVAL; @@ -4759,61 +5258,68 @@ static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab, reg_info->phy_id = chan_list_event_hdr->phy_id; reg_info->ctry_code = chan_list_event_hdr->country_id; reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code; - if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_PASS) - reg_info->status_code = REG_SET_CC_STATUS_PASS; - else if (chan_list_event_hdr->status_code == WMI_REG_CURRENT_ALPHA2_NOT_FOUND) - reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND; - else if (chan_list_event_hdr->status_code == WMI_REG_INIT_ALPHA2_NOT_FOUND) - reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND; - else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_CHANGE_NOT_ALLOWED) - reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED; - else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_NO_MEMORY) - reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY; - else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_FAIL) - reg_info->status_code = REG_SET_CC_STATUS_FAIL; - - reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g; - reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g; - reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g; - reg_info->max_bw_5g = chan_list_event_hdr->max_bw_5g; - - num_2g_reg_rules = reg_info->num_2g_reg_rules; - num_5g_reg_rules = reg_info->num_5g_reg_rules; ath11k_dbg(ab, ATH11K_DBG_WMI, - "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d", - __func__, reg_info->alpha2, reg_info->dfs_region, - reg_info->min_bw_2g, reg_info->max_bw_2g, - reg_info->min_bw_5g, reg_info->max_bw_5g); + "status_code %s", + ath11k_cc_status_to_str(reg_info->status_code)); + + reg_info->status_code = + ath11k_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code); + + reg_info->is_ext_reg_event = false; + + reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz; + reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz; + reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz; + reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz; + + num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules; + num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules; + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d", + reg_info->alpha2, reg_info->dfs_region, + reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, + reg_info->min_bw_5ghz, reg_info->max_bw_5ghz); ath11k_dbg(ab, ATH11K_DBG_WMI, - "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__, - num_2g_reg_rules, num_5g_reg_rules); + "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", + num_2ghz_reg_rules, num_5ghz_reg_rules); wmi_reg_rule = (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr + sizeof(*chan_list_event_hdr) + sizeof(struct wmi_tlv)); - if (num_2g_reg_rules) { - reg_info->reg_rules_2g_ptr = create_reg_rules_from_wmi(num_2g_reg_rules, - wmi_reg_rule); - if (!reg_info->reg_rules_2g_ptr) { + if (num_2ghz_reg_rules) { + reg_info->reg_rules_2ghz_ptr = + create_reg_rules_from_wmi(num_2ghz_reg_rules, + wmi_reg_rule); + if (!reg_info->reg_rules_2ghz_ptr) { kfree(tb); - ath11k_warn(ab, "Unable to Allocate memory for 2g rules\n"); + ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n"); return -ENOMEM; } + + ath11k_print_reg_rule(ab, "2 GHz", + num_2ghz_reg_rules, + reg_info->reg_rules_2ghz_ptr); } - if (num_5g_reg_rules) { - wmi_reg_rule += num_2g_reg_rules; - reg_info->reg_rules_5g_ptr = create_reg_rules_from_wmi(num_5g_reg_rules, - wmi_reg_rule); - if (!reg_info->reg_rules_5g_ptr) { + if (num_5ghz_reg_rules) { + wmi_reg_rule += num_2ghz_reg_rules; + reg_info->reg_rules_5ghz_ptr = + create_reg_rules_from_wmi(num_5ghz_reg_rules, + wmi_reg_rule); + if (!reg_info->reg_rules_5ghz_ptr) { kfree(tb); - ath11k_warn(ab, "Unable to Allocate memory for 5g rules\n"); + ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n"); return -ENOMEM; } + + ath11k_print_reg_rule(ab, "5 GHz", + num_5ghz_reg_rules, + reg_info->reg_rules_5ghz_ptr); } ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n"); @@ -4822,6 +5328,430 @@ static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab, return 0; } +static struct cur_reg_rule +*create_ext_reg_rules_from_wmi(u32 num_reg_rules, + struct wmi_regulatory_ext_rule *wmi_reg_rule) +{ + struct cur_reg_rule *reg_rule_ptr; + u32 count; + + reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr), GFP_ATOMIC); + + if (!reg_rule_ptr) + return NULL; + + for (count = 0; count < num_reg_rules; count++) { + reg_rule_ptr[count].start_freq = + u32_get_bits(wmi_reg_rule[count].freq_info, + REG_RULE_START_FREQ); + reg_rule_ptr[count].end_freq = + u32_get_bits(wmi_reg_rule[count].freq_info, + REG_RULE_END_FREQ); + reg_rule_ptr[count].max_bw = + u32_get_bits(wmi_reg_rule[count].bw_pwr_info, + REG_RULE_MAX_BW); + reg_rule_ptr[count].reg_power = + u32_get_bits(wmi_reg_rule[count].bw_pwr_info, + REG_RULE_REG_PWR); + reg_rule_ptr[count].ant_gain = + u32_get_bits(wmi_reg_rule[count].bw_pwr_info, + REG_RULE_ANT_GAIN); + reg_rule_ptr[count].flags = + u32_get_bits(wmi_reg_rule[count].flag_info, + REG_RULE_FLAGS); + reg_rule_ptr[count].psd_flag = + u32_get_bits(wmi_reg_rule[count].psd_power_info, + REG_RULE_PSD_INFO); + reg_rule_ptr[count].psd_eirp = + u32_get_bits(wmi_reg_rule[count].psd_power_info, + REG_RULE_PSD_EIRP); + } + + return reg_rule_ptr; +} + +static u8 +ath11k_invalid_5ghz_reg_ext_rules_from_wmi(u32 num_reg_rules, + const struct wmi_regulatory_ext_rule *rule) +{ + u8 num_invalid_5ghz_rules = 0; + u32 count, start_freq; + + for (count = 0; count < num_reg_rules; count++) { + start_freq = u32_get_bits(rule[count].freq_info, + REG_RULE_START_FREQ); + + if (start_freq >= ATH11K_MIN_6G_FREQ) + num_invalid_5ghz_rules++; + } + + return num_invalid_5ghz_rules; +} + +static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab, + struct sk_buff *skb, + struct cur_regulatory_info *reg_info) +{ + const void **tb; + const struct wmi_reg_chan_list_cc_ext_event *ev; + struct wmi_regulatory_ext_rule *ext_wmi_reg_rule; + u32 num_2ghz_reg_rules, num_5ghz_reg_rules; + u32 num_6ghz_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; + u32 num_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; + u32 total_reg_rules = 0; + int ret, i, j, num_invalid_5ghz_ext_rules = 0; + + ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n"); + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch reg chan list ext update ev\n"); + kfree(tb); + return -EPROTO; + } + + reg_info->num_2ghz_reg_rules = ev->num_2ghz_reg_rules; + reg_info->num_5ghz_reg_rules = ev->num_5ghz_reg_rules; + reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] = + ev->num_6ghz_reg_rules_ap_lpi; + reg_info->num_6ghz_rules_ap[WMI_REG_STANDARD_POWER_AP] = + ev->num_6ghz_reg_rules_ap_sp; + reg_info->num_6ghz_rules_ap[WMI_REG_VERY_LOW_POWER_AP] = + ev->num_6ghz_reg_rules_ap_vlp; + + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i] = + ev->num_6ghz_reg_rules_client_lpi[i]; + reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i] = + ev->num_6ghz_reg_rules_client_sp[i]; + reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i] = + ev->num_6ghz_reg_rules_client_vlp[i]; + } + + num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules; + num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules; + + total_reg_rules += num_2ghz_reg_rules; + total_reg_rules += num_5ghz_reg_rules; + + if ((num_2ghz_reg_rules > MAX_REG_RULES) || + (num_5ghz_reg_rules > MAX_REG_RULES)) { + ath11k_warn(ab, "Num reg rules for 2.4 GHz/5 GHz exceeds max limit (num_2ghz_reg_rules: %d num_5ghz_reg_rules: %d max_rules: %d)\n", + num_2ghz_reg_rules, num_5ghz_reg_rules, MAX_REG_RULES); + kfree(tb); + return -EINVAL; + } + + for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { + num_6ghz_reg_rules_ap[i] = reg_info->num_6ghz_rules_ap[i]; + + if (num_6ghz_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) { + ath11k_warn(ab, "Num 6 GHz reg rules for AP mode(%d) exceeds max limit (num_6ghz_reg_rules_ap: %d, max_rules: %d)\n", + i, num_6ghz_reg_rules_ap[i], MAX_6GHZ_REG_RULES); + kfree(tb); + return -EINVAL; + } + + total_reg_rules += num_6ghz_reg_rules_ap[i]; + } + + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + num_6ghz_client[WMI_REG_INDOOR_AP][i] = + reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i]; + total_reg_rules += num_6ghz_client[WMI_REG_INDOOR_AP][i]; + + num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = + reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i]; + total_reg_rules += num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i]; + + num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = + reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i]; + total_reg_rules += num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]; + + if ((num_6ghz_client[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES) || + (num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] > + MAX_6GHZ_REG_RULES) || + (num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] > + MAX_6GHZ_REG_RULES)) { + ath11k_warn(ab, + "Num 6 GHz client reg rules exceeds max limit, for client(type: %d)\n", + i); + kfree(tb); + return -EINVAL; + } + } + + if (!total_reg_rules) { + ath11k_warn(ab, "No reg rules available\n"); + kfree(tb); + return -EINVAL; + } + + memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); + + reg_info->dfs_region = ev->dfs_region; + reg_info->phybitmap = ev->phybitmap; + reg_info->num_phy = ev->num_phy; + reg_info->phy_id = ev->phy_id; + reg_info->ctry_code = ev->country_id; + reg_info->reg_dmn_pair = ev->domain_code; + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "status_code %s", + ath11k_cc_status_to_str(reg_info->status_code)); + + reg_info->status_code = + ath11k_wmi_cc_setting_code_to_reg(ev->status_code); + + reg_info->is_ext_reg_event = true; + + reg_info->min_bw_2ghz = ev->min_bw_2ghz; + reg_info->max_bw_2ghz = ev->max_bw_2ghz; + reg_info->min_bw_5ghz = ev->min_bw_5ghz; + reg_info->max_bw_5ghz = ev->max_bw_5ghz; + + reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP] = + ev->min_bw_6ghz_ap_lpi; + reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP] = + ev->max_bw_6ghz_ap_lpi; + reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = + ev->min_bw_6ghz_ap_sp; + reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = + ev->max_bw_6ghz_ap_sp; + reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = + ev->min_bw_6ghz_ap_vlp; + reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = + ev->max_bw_6ghz_ap_vlp; + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "6 GHz AP BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n", + reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP], + reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP], + reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP], + reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP], + reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP], + reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP]); + + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i] = + ev->min_bw_6ghz_client_lpi[i]; + reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i] = + ev->max_bw_6ghz_client_lpi[i]; + reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = + ev->min_bw_6ghz_client_sp[i]; + reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = + ev->max_bw_6ghz_client_sp[i]; + reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = + ev->min_bw_6ghz_client_vlp[i]; + reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = + ev->max_bw_6ghz_client_vlp[i]; + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "6 GHz %s BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n", + ath11k_6ghz_client_type_to_str(i), + reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i], + reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i], + reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i], + reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i], + reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i], + reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]); + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "cc_ext %s dfs %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d phy_bitmap 0x%x", + reg_info->alpha2, reg_info->dfs_region, + reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, + reg_info->min_bw_5ghz, reg_info->max_bw_5ghz, + reg_info->phybitmap); + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", + num_2ghz_reg_rules, num_5ghz_reg_rules); + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "num_6ghz_reg_rules_ap_lpi: %d num_6ghz_reg_rules_ap_sp: %d num_6ghz_reg_rules_ap_vlp: %d", + num_6ghz_reg_rules_ap[WMI_REG_INDOOR_AP], + num_6ghz_reg_rules_ap[WMI_REG_STANDARD_POWER_AP], + num_6ghz_reg_rules_ap[WMI_REG_VERY_LOW_POWER_AP]); + + j = WMI_REG_DEFAULT_CLIENT; + ath11k_dbg(ab, ATH11K_DBG_WMI, + "6 GHz Regular client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d", + num_6ghz_client[WMI_REG_INDOOR_AP][j], + num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j], + num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]); + + j = WMI_REG_SUBORDINATE_CLIENT; + ath11k_dbg(ab, ATH11K_DBG_WMI, + "6 GHz Subordinate client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d", + num_6ghz_client[WMI_REG_INDOOR_AP][j], + num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j], + num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]); + + ext_wmi_reg_rule = + (struct wmi_regulatory_ext_rule *)((u8 *)ev + sizeof(*ev) + + sizeof(struct wmi_tlv)); + if (num_2ghz_reg_rules) { + reg_info->reg_rules_2ghz_ptr = + create_ext_reg_rules_from_wmi(num_2ghz_reg_rules, + ext_wmi_reg_rule); + + if (!reg_info->reg_rules_2ghz_ptr) { + kfree(tb); + ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n"); + return -ENOMEM; + } + + ath11k_print_reg_rule(ab, "2 GHz", + num_2ghz_reg_rules, + reg_info->reg_rules_2ghz_ptr); + } + + ext_wmi_reg_rule += num_2ghz_reg_rules; + + /* Firmware might include 6 GHz reg rule in 5 GHz rule list + * for few countries along with separate 6 GHz rule. + * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list + * causes intersect check to be true, and same rules will be + * shown multiple times in iw cmd. + * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list + */ + num_invalid_5ghz_ext_rules = + ath11k_invalid_5ghz_reg_ext_rules_from_wmi(num_5ghz_reg_rules, + ext_wmi_reg_rule); + + if (num_invalid_5ghz_ext_rules) { + ath11k_dbg(ab, ATH11K_DBG_WMI, + "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules", + reg_info->alpha2, reg_info->num_5ghz_reg_rules, + num_invalid_5ghz_ext_rules); + + num_5ghz_reg_rules = num_5ghz_reg_rules - num_invalid_5ghz_ext_rules; + reg_info->num_5ghz_reg_rules = num_5ghz_reg_rules; + } + + if (num_5ghz_reg_rules) { + reg_info->reg_rules_5ghz_ptr = + create_ext_reg_rules_from_wmi(num_5ghz_reg_rules, + ext_wmi_reg_rule); + + if (!reg_info->reg_rules_5ghz_ptr) { + kfree(tb); + ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n"); + return -ENOMEM; + } + + ath11k_print_reg_rule(ab, "5 GHz", + num_5ghz_reg_rules, + reg_info->reg_rules_5ghz_ptr); + } + + /* We have adjusted the number of 5 GHz reg rules above. But still those + * many rules needs to be adjusted in ext_wmi_reg_rule. + * + * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases. + */ + ext_wmi_reg_rule += (num_5ghz_reg_rules + num_invalid_5ghz_ext_rules); + + for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { + reg_info->reg_rules_6ghz_ap_ptr[i] = + create_ext_reg_rules_from_wmi(num_6ghz_reg_rules_ap[i], + ext_wmi_reg_rule); + + if (!reg_info->reg_rules_6ghz_ap_ptr[i]) { + kfree(tb); + ath11k_warn(ab, "Unable to Allocate memory for 6 GHz AP rules\n"); + return -ENOMEM; + } + + ath11k_print_reg_rule(ab, ath11k_6ghz_ap_type_to_str(i), + num_6ghz_reg_rules_ap[i], + reg_info->reg_rules_6ghz_ap_ptr[i]); + + ext_wmi_reg_rule += num_6ghz_reg_rules_ap[i]; + } + + for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { + ath11k_dbg(ab, ATH11K_DBG_WMI, + "6 GHz AP type %s", ath11k_6ghz_ap_type_to_str(j)); + + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + reg_info->reg_rules_6ghz_client_ptr[j][i] = + create_ext_reg_rules_from_wmi(num_6ghz_client[j][i], + ext_wmi_reg_rule); + + if (!reg_info->reg_rules_6ghz_client_ptr[j][i]) { + kfree(tb); + ath11k_warn(ab, "Unable to Allocate memory for 6 GHz client rules\n"); + return -ENOMEM; + } + + ath11k_print_reg_rule(ab, + ath11k_6ghz_client_type_to_str(i), + num_6ghz_client[j][i], + reg_info->reg_rules_6ghz_client_ptr[j][i]); + + ext_wmi_reg_rule += num_6ghz_client[j][i]; + } + } + + reg_info->client_type = ev->client_type; + reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; + reg_info->unspecified_ap_usable = + ev->unspecified_ap_usable; + reg_info->domain_code_6ghz_ap[WMI_REG_INDOOR_AP] = + ev->domain_code_6ghz_ap_lpi; + reg_info->domain_code_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = + ev->domain_code_6ghz_ap_sp; + reg_info->domain_code_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = + ev->domain_code_6ghz_ap_vlp; + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "6 GHz reg info client type %s rnr_tpe_usable %d unspecified_ap_usable %d AP sub domain: lpi %s, sp %s, vlp %s\n", + ath11k_6ghz_client_type_to_str(reg_info->client_type), + reg_info->rnr_tpe_usable, + reg_info->unspecified_ap_usable, + ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_lpi), + ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_sp), + ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_vlp)); + + for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { + reg_info->domain_code_6ghz_client[WMI_REG_INDOOR_AP][i] = + ev->domain_code_6ghz_client_lpi[i]; + reg_info->domain_code_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = + ev->domain_code_6ghz_client_sp[i]; + reg_info->domain_code_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = + ev->domain_code_6ghz_client_vlp[i]; + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "6 GHz client type %s client sub domain: lpi %s, sp %s, vlp %s\n", + ath11k_6ghz_client_type_to_str(i), + ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_lpi[i]), + ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_sp[i]), + ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_vlp[i]) + ); + } + + reg_info->domain_code_6ghz_super_id = ev->domain_code_6ghz_super_id; + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "6 GHz client_type %s 6 GHz super domain %s", + ath11k_6ghz_client_type_to_str(reg_info->client_type), + ath11k_super_reg_6ghz_to_str(reg_info->domain_code_6ghz_super_id)); + + ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory ext channel list\n"); + + kfree(tb); + return 0; +} + static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_peer_delete_resp_event *peer_del_resp) { @@ -4829,7 +5759,7 @@ static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff * const struct wmi_peer_delete_resp_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4861,7 +5791,7 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab, const struct wmi_vdev_delete_resp_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4881,15 +5811,15 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab, return 0; } -static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf, - u32 len, u32 *vdev_id, - u32 *tx_status) +static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, + struct sk_buff *skb, + u32 *vdev_id, u32 *tx_status) { const void **tb; const struct wmi_bcn_tx_status_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4917,7 +5847,7 @@ static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_ const struct wmi_vdev_stopped_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -4937,28 +5867,49 @@ static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_ return 0; } +static int ath11k_wmi_tlv_mgmt_rx_parse(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_mgmt_rx_parse *parse = data; + + switch (tag) { + case WMI_TAG_MGMT_RX_HDR: + parse->fixed = ptr; + break; + case WMI_TAG_ARRAY_BYTE: + if (!parse->frame_buf_done) { + parse->frame_buf = ptr; + parse->frame_buf_done = true; + } + break; + } + return 0; +} + static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab, struct sk_buff *skb, struct mgmt_rx_event_params *hdr) { - const void **tb; + struct wmi_tlv_mgmt_rx_parse parse = { }; const struct wmi_mgmt_rx_hdr *ev; const u8 *frame; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); - if (IS_ERR(tb)) { - ret = PTR_ERR(tb); - ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_mgmt_rx_parse, + &parse); + if (ret) { + ath11k_warn(ab, "failed to parse mgmt rx tlv %d\n", + ret); return ret; } - ev = tb[WMI_TAG_MGMT_RX_HDR]; - frame = tb[WMI_TAG_ARRAY_BYTE]; + ev = parse.fixed; + frame = parse.frame_buf; if (!ev || !frame) { ath11k_warn(ab, "failed to fetch mgmt rx hdr"); - kfree(tb); return -EPROTO; } @@ -4977,7 +5928,6 @@ static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab, if (skb->len < (frame - skb->data) + hdr->buf_len) { ath11k_warn(ab, "invalid length in mgmt rx hdr ev"); - kfree(tb); return -EPROTO; } @@ -4989,12 +5939,11 @@ static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab, ath11k_ce_byte_swap(skb->data, hdr->buf_len); - kfree(tb); return 0; } -static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id, - u32 status) +static int wmi_process_mgmt_tx_comp(struct ath11k *ar, + struct wmi_mgmt_tx_compl_event *tx_compl_param) { struct sk_buff *msdu; struct ieee80211_tx_info *info; @@ -5002,24 +5951,32 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id, int num_mgmt; spin_lock_bh(&ar->txmgmt_idr_lock); - msdu = idr_find(&ar->txmgmt_idr, desc_id); + msdu = idr_find(&ar->txmgmt_idr, tx_compl_param->desc_id); if (!msdu) { ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", - desc_id); + tx_compl_param->desc_id); spin_unlock_bh(&ar->txmgmt_idr_lock); return -ENOENT; } - idr_remove(&ar->txmgmt_idr, desc_id); + idr_remove(&ar->txmgmt_idr, tx_compl_param->desc_id); spin_unlock_bh(&ar->txmgmt_idr_lock); skb_cb = ATH11K_SKB_CB(msdu); dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); - if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) + memset(&info->status, 0, sizeof(info->status)); + info->status.rates[0].idx = -1; + + if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && + !tx_compl_param->status) { info->flags |= IEEE80211_TX_STAT_ACK; + if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI, + ar->ab->wmi_ab.svc_map)) + info->status.ack_signal = tx_compl_param->ack_rssi; + } ieee80211_tx_status_irqsafe(ar->hw, msdu); @@ -5030,8 +5987,8 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id, WARN_ON_ONCE(1); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmi mgmt tx comp pending %d desc id %d\n", - num_mgmt, desc_id); + "mgmt tx comp pending %d desc id %d\n", + num_mgmt, tx_compl_param->desc_id); if (!num_mgmt) wake_up(&ar->txmgmt_empty_waitq); @@ -5047,7 +6004,7 @@ static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab, const struct wmi_mgmt_tx_compl_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5064,6 +6021,7 @@ static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab, param->pdev_id = ev->pdev_id; param->desc_id = ev->desc_id; param->status = ev->status; + param->ack_rssi = ev->ack_rssi; kfree(tb); return 0; @@ -5083,6 +6041,8 @@ static void ath11k_wmi_event_scan_started(struct ath11k *ar) break; case ATH11K_SCAN_STARTING: ar->scan.state = ATH11K_SCAN_RUNNING; + if (ar->scan.is_roc) + ieee80211_ready_on_channel(ar->hw); complete(&ar->scan.started); break; } @@ -5165,6 +6125,8 @@ static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq) case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + if (ar->scan.is_roc && ar->scan.roc_freq == freq) + complete(&ar->scan.on_channel); break; } } @@ -5218,7 +6180,7 @@ static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb, const struct wmi_scan_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5251,7 +6213,7 @@ static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buf const struct wmi_peer_sta_kickout_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5278,7 +6240,7 @@ static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb, const struct wmi_roam_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5319,14 +6281,14 @@ exit: return idx; } -static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf, - u32 len, struct wmi_chan_info_event *ch_info_ev) +static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, + struct wmi_chan_info_event *ch_info_ev) { const void **tb; const struct wmi_chan_info_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5365,7 +6327,7 @@ ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, const struct wmi_pdev_bss_chan_info_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5405,7 +6367,7 @@ ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *sk const struct wmi_vdev_install_key_compl_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5436,7 +6398,7 @@ static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff const struct wmi_peer_assoc_conf_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -5594,7 +6556,7 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi stats vdev id %d mac %pM\n", + "stats vdev id %d mac %pM\n", stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr); arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id); @@ -5606,20 +6568,20 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, } ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi stats bssid %pM vif %pK\n", + "stats bssid %pM vif %p\n", arvif->bssid, arvif->vif); sta = ieee80211_find_sta_by_ifaddr(ar->hw, arvif->bssid, NULL); if (!sta) { - ath11k_warn(ab, "not found station for bssid %pM\n", - arvif->bssid); - ret = -EPROTO; + ath11k_dbg(ab, ATH11K_DBG_WMI, + "not found station of bssid %pM for rssi chain\n", + arvif->bssid); goto exit; } - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); @@ -5627,7 +6589,7 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) { arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j]; ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi stats beacon rssi[%d] %d data rssi[%d] %d\n", + "stats beacon rssi[%d] %d data rssi[%d] %d\n", j, stats_rssi->rssi_avg_beacon[j], j, @@ -5707,14 +6669,15 @@ static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, arvif->bssid, NULL); if (sta) { - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); arsta->rssi_beacon = src->beacon_snr; ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi stats vdev id %d snr %d\n", + "stats vdev id %d snr %d\n", src->vdev_id, src->beacon_snr); } else { - ath11k_warn(ab, "not found station for bssid %pM\n", - arvif->bssid); + ath11k_dbg(ab, ATH11K_DBG_WMI, + "not found station of bssid %pM for vdev stat\n", + arvif->bssid); } } @@ -5779,7 +6742,7 @@ static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab, parse->rssi_num = parse->rssi->num_per_chain_rssi_stats; ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi stats id 0x%x num chain %d\n", + "stats id 0x%x num chain %d\n", parse->ev->stats_id, parse->rssi_num); break; @@ -5815,28 +6778,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, &parse); } -size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head) -{ - struct ath11k_fw_stats_vdev *i; - size_t num = 0; - - list_for_each_entry(i, head, list) - ++num; - - return num; -} - -static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head *head) -{ - struct ath11k_fw_stats_bcn *i; - size_t num = 0; - - list_for_each_entry(i, head, list) - ++num; - - return num; -} - static void ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev, char *buf, u32 *length) @@ -6147,7 +7088,7 @@ void ath11k_wmi_fw_stats_fill(struct ath11k *ar, } if (stats_id == WMI_REQUEST_BCN_STAT) { - num_bcn = ath11k_wmi_fw_stats_num_bcn(&fw_stats->bcn); + num_bcn = list_count_nodes(&fw_stats->bcn); len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", @@ -6177,10 +7118,12 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab) static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb) { const struct wmi_11d_new_cc_ev *ev; + struct ath11k *ar; + struct ath11k_pdev *pdev; const void **tb; - int ret; + int ret, i; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -6198,12 +7141,19 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s memcpy(&ab->new_alpha2, &ev->new_alpha2, 2); spin_unlock_bh(&ab->base_lock); - ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi 11d new cc %c%c\n", + ath11k_dbg(ab, ATH11K_DBG_WMI, "event 11d new cc %c%c\n", ab->new_alpha2[0], ab->new_alpha2[1]); kfree(tb); + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + ar->state_11d = ATH11K_11D_IDLE; + complete(&ar->completed_11d_scan); + } + queue_work(ab->workqueue, &ab->update_11d_work); return 0; @@ -6238,135 +7188,38 @@ static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab, wake_up(&wmi->tx_ce_desc_wq); } -static bool ath11k_reg_is_world_alpha(char *alpha) -{ - if (alpha[0] == '0' && alpha[1] == '0') - return true; - - if (alpha[0] == 'n' && alpha[1] == 'a') - return true; - - return false; -} - -static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb) +static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb, + enum wmi_reg_chan_list_cmd_type id) { - struct cur_regulatory_info *reg_info = NULL; - struct ieee80211_regdomain *regd = NULL; - bool intersect = false; - int ret = 0, pdev_idx; - struct ath11k *ar; + struct cur_regulatory_info *reg_info; + int ret; reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); - if (!reg_info) { - ret = -ENOMEM; - goto fallback; - } + if (!reg_info) + return -ENOMEM; - ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info); - if (ret) { - ath11k_warn(ab, "failed to extract regulatory info from received event\n"); - goto fallback; - } + if (id == WMI_REG_CHAN_LIST_CC_ID) + ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info); + else + ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); - if (reg_info->status_code != REG_SET_CC_STATUS_PASS) { - /* In case of failure to set the requested ctry, - * fw retains the current regd. We print a failure info - * and return from here. - */ - ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n"); + if (ret) { + ath11k_warn(ab, "failed to extract regulatory info\n"); goto mem_free; } - pdev_idx = reg_info->phy_id; - - /* Avoid default reg rule updates sent during FW recovery if - * it is already available - */ - spin_lock(&ab->base_lock); - if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) && - ab->default_regd[pdev_idx]) { - spin_unlock(&ab->base_lock); + ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP); + if (ret) { + ath11k_warn(ab, "failed to process regulatory info %d\n", ret); goto mem_free; } - spin_unlock(&ab->base_lock); - - if (pdev_idx >= ab->num_radios) { - /* Process the event for phy0 only if single_pdev_only - * is true. If pdev_idx is valid but not 0, discard the - * event. Otherwise, it goes to fallback. - */ - if (ab->hw_params.single_pdev_only && - pdev_idx < ab->hw_params.num_rxmda_per_pdev) - goto mem_free; - else - goto fallback; - } - /* Avoid multiple overwrites to default regd, during core - * stop-start after mac registration. - */ - if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] && - !memcmp((char *)ab->default_regd[pdev_idx]->alpha2, - (char *)reg_info->alpha2, 2)) - goto mem_free; - - /* Intersect new rules with default regd if a new country setting was - * requested, i.e a default regd was already set during initialization - * and the regd coming from this event has a valid country info. - */ - if (ab->default_regd[pdev_idx] && - !ath11k_reg_is_world_alpha((char *) - ab->default_regd[pdev_idx]->alpha2) && - !ath11k_reg_is_world_alpha((char *)reg_info->alpha2)) - intersect = true; - - regd = ath11k_reg_build_regd(ab, reg_info, intersect); - if (!regd) { - ath11k_warn(ab, "failed to build regd from reg_info\n"); - goto fallback; - } - - spin_lock(&ab->base_lock); - if (ab->default_regd[pdev_idx]) { - /* The initial rules from FW after WMI Init is to build - * the default regd. From then on, any rules updated for - * the pdev could be due to user reg changes. - * Free previously built regd before assigning the newly - * generated regd to ar. NULL pointer handling will be - * taken care by kfree itself. - */ - ar = ab->pdevs[pdev_idx].ar; - kfree(ab->new_regd[pdev_idx]); - ab->new_regd[pdev_idx] = regd; - queue_work(ab->workqueue, &ar->regd_update_work); - } else { - /* This regd would be applied during mac registration and is - * held constant throughout for regd intersection purpose - */ - ab->default_regd[pdev_idx] = regd; - } - ab->dfs_region = reg_info->dfs_region; - spin_unlock(&ab->base_lock); - - goto mem_free; + kfree(reg_info); + return 0; -fallback: - /* Fallback to older reg (by sending previous country setting - * again if fw has succeded and we failed to process here. - * The Regdomain should be uniform across driver and fw. Since the - * FW has processed the command and sent a success status, we expect - * this function to succeed as well. If it doesn't, CTRY needs to be - * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent. - */ - /* TODO: This is rare, but still should also be handled */ - WARN_ON(1); mem_free: - if (reg_info) { - kfree(reg_info->reg_rules_2g_ptr); - kfree(reg_info->reg_rules_5g_ptr); - kfree(reg_info); - } + ath11k_reg_reset_info(reg_info); + kfree(reg_info); return ret; } @@ -6385,14 +7238,12 @@ static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, memset(&fixed_param, 0, sizeof(fixed_param)); memcpy(&fixed_param, (struct wmi_ready_event *)ptr, min_t(u16, sizeof(fixed_param), len)); - ab->wlan_init_status = fixed_param.ready_event_min.status; rdy_parse->num_extra_mac_addr = fixed_param.ready_event_min.num_extra_mac_addr; ether_addr_copy(ab->mac_addr, fixed_param.ready_event_min.mac_addr.addr); ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum; - ab->wmi_ready = true; break; case WMI_TAG_ARRAY_FIXED_STRUCT: addr_list = (struct wmi_mac_addr *)ptr; @@ -6426,6 +7277,8 @@ static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb) return ret; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event ready"); + complete(&ab->wmi_ab.unified_ready); return 0; } @@ -6440,6 +7293,8 @@ static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff return; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer delete resp"); + rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id); if (!ar) { @@ -6479,7 +7334,7 @@ static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab, rcu_read_unlock(); - ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev delete resp for vdev id %d\n", + ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev delete resp for vdev id %d\n", vdev_id); } @@ -6510,6 +7365,8 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff return; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event start resp event"); + rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id); if (!ar) { @@ -6520,7 +7377,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff } ar->last_wmi_vdev_start_status = 0; - + ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power; status = vdev_start_resp.status; if (WARN_ON_ONCE(status)) { @@ -6542,12 +7399,13 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s struct ath11k_vif *arvif; u32 vdev_id, tx_status; - if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len, - &vdev_id, &tx_status) != 0) { + if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { ath11k_warn(ab, "failed to extract bcn tx status"); return; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event offload bcn tx status"); + rcu_read_lock(); arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id); if (!arvif) { @@ -6556,10 +7414,113 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s rcu_read_unlock(); return; } - ath11k_mac_bcn_tx_event(arvif); + + queue_work(ab->workqueue, &arvif->bcn_tx_work); + rcu_read_unlock(); } +static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_peer_sta_ps_state_chg_event *ev; + struct ieee80211_sta *sta; + struct ath11k_peer *peer; + struct ath11k *ar; + struct ath11k_sta *arsta; + const void **tb; + enum ath11k_wmi_peer_ps_state peer_previous_ps_state; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch sta ps change ev"); + kfree(tb); + return; + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "event peer sta ps change ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n", + ev->peer_macaddr.addr, ev->peer_ps_state, + ev->ps_supported_bitmap, ev->peer_ps_valid, + ev->peer_ps_timestamp); + + rcu_read_lock(); + + spin_lock_bh(&ab->base_lock); + + peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr); + + if (!peer) { + spin_unlock_bh(&ab->base_lock); + ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr); + goto exit; + } + + ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id); + + if (!ar) { + spin_unlock_bh(&ab->base_lock); + ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d", + peer->vdev_id); + + goto exit; + } + + sta = peer->sta; + + spin_unlock_bh(&ab->base_lock); + + if (!sta) { + ath11k_warn(ab, "failed to find station entry %pM\n", + ev->peer_macaddr.addr); + goto exit; + } + + arsta = ath11k_sta_to_arsta(sta); + + spin_lock_bh(&ar->data_lock); + + peer_previous_ps_state = arsta->peer_ps_state; + arsta->peer_ps_state = ev->peer_ps_state; + arsta->peer_current_ps_valid = !!ev->peer_ps_valid; + + if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT, + ar->ab->wmi_ab.svc_map)) { + if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) || + !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) || + !ev->peer_ps_valid) + goto out; + + if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) { + arsta->ps_start_time = ev->peer_ps_timestamp; + arsta->ps_start_jiffies = jiffies; + } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF && + peer_previous_ps_state == WMI_PEER_PS_STATE_ON) { + arsta->ps_total_duration = arsta->ps_total_duration + + (ev->peer_ps_timestamp - arsta->ps_start_time); + } + + if (ar->ps_timekeeper_enable) + trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr, + ev->peer_ps_timestamp, + arsta->peer_ps_state); + } + +out: + spin_unlock_bh(&ar->data_lock); +exit: + rcu_read_unlock(); + kfree(tb); +} + static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; @@ -6570,6 +7531,8 @@ static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *sk return; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev stopped"); + rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { @@ -6588,7 +7551,7 @@ static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *sk static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct mgmt_rx_event_params rx_ev = {0}; + struct mgmt_rx_event_params rx_ev = {}; struct ath11k *ar; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr; @@ -6603,7 +7566,7 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) memset(status, 0, sizeof(*status)); - ath11k_dbg(ab, ATH11K_DBG_MGMT, "mgmt rx event status %08x\n", + ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx status %08x\n", rx_ev.status); rcu_read_lock(); @@ -6646,7 +7609,7 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) if (rx_ev.phy_mode == MODE_11B && (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); + "mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); sband = &ar->mac.sbands[status->band]; @@ -6686,7 +7649,7 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) ath11k_mac_handle_beacon(ar, skb); ath11k_dbg(ab, ATH11K_DBG_MGMT, - "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", + "event mgmt rx skb %p len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); @@ -6703,7 +7666,7 @@ exit: static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct wmi_mgmt_tx_compl_event tx_compl_param = {0}; + struct wmi_mgmt_tx_compl_event tx_compl_param = {}; struct ath11k *ar; if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { @@ -6719,13 +7682,12 @@ static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *s goto exit; } - wmi_process_mgmt_tx_comp(ar, tx_compl_param.desc_id, - tx_compl_param.status); + wmi_process_mgmt_tx_comp(ar, &tx_compl_param); ath11k_dbg(ab, ATH11K_DBG_MGMT, - "mgmt tx compl ev pdev_id %d, desc_id %d, status %d", + "event mgmt tx compl ev pdev_id %d, desc_id %d, status %d ack_rssi %d", tx_compl_param.pdev_id, tx_compl_param.desc_id, - tx_compl_param.status); + tx_compl_param.status, tx_compl_param.ack_rssi); exit: rcu_read_unlock(); @@ -6759,7 +7721,7 @@ static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab, static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; - struct wmi_scan_event scan_ev = {0}; + struct wmi_scan_event scan_ev = {}; if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) { ath11k_warn(ab, "failed to extract scan event"); @@ -6794,7 +7756,7 @@ static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb) spin_lock_bh(&ar->data_lock); ath11k_dbg(ab, ATH11K_DBG_WMI, - "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", + "event scan %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason), scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq, scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id, @@ -6877,7 +7839,7 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff goto exit; } - ath11k_dbg(ab, ATH11K_DBG_WMI, "peer sta kickout event %pM", + ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer sta kickout %pM", arg.mac_addr); ieee80211_report_low_ack(sta, 10); @@ -6897,7 +7859,7 @@ static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb) } ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi roam event vdev %u reason 0x%08x rssi %d\n", + "event roam vdev %u reason 0x%08x rssi %d\n", roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi); rcu_read_lock(); @@ -6931,20 +7893,20 @@ static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb) static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct wmi_chan_info_event ch_info_ev = {0}; + struct wmi_chan_info_event ch_info_ev = {}; struct ath11k *ar; struct survey_info *survey; int idx; /* HW channel counters frequency value in hertz */ u32 cc_freq_hz = ab->cc_freq_hz; - if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) { + if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { ath11k_warn(ab, "failed to extract chan info event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, - "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", + "event chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, ch_info_ev.cmd_flags, ch_info_ev.noise_floor, ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, @@ -7033,7 +7995,7 @@ ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) bss_ch_info_ev.rx_bss_cycle_count_low; ath11k_dbg(ab, ATH11K_DBG_WMI, - "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", + "event pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, bss_ch_info_ev.noise_floor, busy, total, tx, rx, rx_bss); @@ -7078,7 +8040,7 @@ exit: static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct wmi_vdev_install_key_complete_arg install_key_compl = {0}; + struct wmi_vdev_install_key_complete_arg install_key_compl = {}; struct ath11k *ar; if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { @@ -7087,7 +8049,7 @@ static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab, } ath11k_dbg(ab, ATH11K_DBG_WMI, - "vdev install key ev idx %d flags %08x macaddr %pM status %d\n", + "event vdev install key ev idx %d flags %08x macaddr %pM status %d\n", install_key_compl.key_idx, install_key_compl.key_flags, install_key_compl.macaddr, install_key_compl.status); @@ -7112,52 +8074,71 @@ static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab, rcu_read_unlock(); } -static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) +static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) { - const void **tb; const struct wmi_service_available_event *ev; - int ret; + u32 *wmi_ext2_service_bitmap; int i, j; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); - if (IS_ERR(tb)) { - ret = PTR_ERR(tb); - ath11k_warn(ab, "failed to parse tlv: %d\n", ret); - return; - } + switch (tag) { + case WMI_TAG_SERVICE_AVAILABLE_EVENT: + ev = (struct wmi_service_available_event *)ptr; + for (i = 0, j = WMI_MAX_SERVICE; + i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; + i++) { + do { + if (ev->wmi_service_segment_bitmap[i] & + BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) + set_bit(j, ab->wmi_ab.svc_map); + } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + } - ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT]; - if (!ev) { - ath11k_warn(ab, "failed to fetch svc available ev"); - kfree(tb); - return; - } + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", + ev->wmi_service_segment_bitmap[0], + ev->wmi_service_segment_bitmap[1], + ev->wmi_service_segment_bitmap[2], + ev->wmi_service_segment_bitmap[3]); + break; + case WMI_TAG_ARRAY_UINT32: + wmi_ext2_service_bitmap = (u32 *)ptr; + for (i = 0, j = WMI_MAX_EXT_SERVICE; + i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE; + i++) { + do { + if (wmi_ext2_service_bitmap[i] & + BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) + set_bit(j, ab->wmi_ab.svc_map); + } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + } - /* TODO: Use wmi_service_segment_offset information to get the service - * especially when more services are advertised in multiple sevice - * available events. - */ - for (i = 0, j = WMI_MAX_SERVICE; - i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; - i++) { - do { - if (ev->wmi_service_segment_bitmap[i] & - BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) - set_bit(j, ab->wmi_ab.svc_map); - } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", + wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1], + wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]); + break; } + return 0; +} - ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x", - ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1], - ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]); +static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + int ret; - kfree(tb); + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_services_parser, + NULL); + if (ret) + ath11k_warn(ab, "failed to parse services available tlv %d\n", ret); + + ath11k_dbg(ab, ATH11K_DBG_WMI, "event service available"); } static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0}; + struct wmi_peer_assoc_conf_arg peer_assoc_conf = {}; struct ath11k *ar; if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { @@ -7166,7 +8147,7 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff } ath11k_dbg(ab, ATH11K_DBG_WMI, - "peer assoc conf ev vdev id %d macaddr %pM\n", + "event peer assoc conf ev vdev id %d macaddr %pM\n", peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); rcu_read_lock(); @@ -7185,7 +8166,97 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb) { - ath11k_debugfs_fw_stats_process(ab, skb); + struct ath11k_fw_stats stats = {}; + size_t total_vdevs_started = 0; + struct ath11k_pdev *pdev; + bool is_end = true; + int i; + + struct ath11k *ar; + int ret; + + INIT_LIST_HEAD(&stats.pdevs); + INIT_LIST_HEAD(&stats.vdevs); + INIT_LIST_HEAD(&stats.bcn); + + ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats); + if (ret) { + ath11k_warn(ab, "failed to pull fw stats: %d\n", ret); + goto free; + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, "event update stats"); + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); + if (!ar) { + rcu_read_unlock(); + ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n", + stats.pdev_id, ret); + goto free; + } + + spin_lock_bh(&ar->data_lock); + + /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and + * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via + * debugfs fw stats. Therefore, processing it separately. + */ + if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { + list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); + complete(&ar->fw_stats_done); + goto complete; + } + + if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { + complete(&ar->fw_stats_done); + goto complete; + } + + if (stats.stats_id == WMI_REQUEST_VDEV_STAT) { + if (list_empty(&stats.vdevs)) { + ath11k_warn(ab, "empty vdev stats"); + goto complete; + } + /* FW sends all the active VDEV stats irrespective of PDEV, + * hence limit until the count of all VDEVs started + */ + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) + total_vdevs_started += ar->num_started_vdevs; + } + + if (total_vdevs_started) + is_end = ((++ar->fw_stats.num_vdev_recvd) == + total_vdevs_started); + + list_splice_tail_init(&stats.vdevs, + &ar->fw_stats.vdevs); + + if (is_end) + complete(&ar->fw_stats_done); + + goto complete; + } + + /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats. + * Hence, processing it in debugfs context + */ + ath11k_debugfs_fw_stats_process(ar, &stats); + +complete: + complete(&ar->fw_stats_complete); + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); + + /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised + * at this point, no need to free the individual list. + */ + return; + +free: + ath11k_fw_stats_free(&stats); } /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned @@ -7198,7 +8269,7 @@ static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab, const struct wmi_pdev_ctl_failsafe_chk_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -7213,7 +8284,7 @@ static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab, } ath11k_dbg(ab, ATH11K_DBG_WMI, - "pdev ctl failsafe check ev status %d\n", + "event pdev ctl failsafe check status %d\n", ev->ctl_failsafe_status); /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power @@ -7248,8 +8319,8 @@ ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab, continue; } - if (arvif->is_up && arvif->vif->csa_active) - ieee80211_csa_finish(arvif->vif); + if (arvif->is_up && arvif->vif->bss_conf.csa_active) + ieee80211_csa_finish(arvif->vif, 0); } rcu_read_unlock(); } @@ -7263,7 +8334,7 @@ ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab, const u32 *vdev_ids; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -7280,7 +8351,7 @@ ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab, } ath11k_dbg(ab, ATH11K_DBG_WMI, - "pdev csa switch count %d for pdev %d, num_vdevs %d", + "event pdev csa switch count %d for pdev %d, num_vdevs %d", ev->current_switch_count, ev->pdev_id, ev->num_vdevs); @@ -7297,7 +8368,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff struct ath11k *ar; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -7313,11 +8384,13 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff } ath11k_dbg(ab, ATH11K_DBG_WMI, - "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", + "event pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, ev->freq_offset, ev->sidx); + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { @@ -7332,43 +8405,11 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff if (ar->dfs_block_radar_events) ath11k_info(ab, "DFS Radar detected, but ignored as requested\n"); else - ieee80211_radar_detected(ar->hw); + ieee80211_radar_detected(ar->hw, NULL); exit: - kfree(tb); -} - -static void ath11k_rfkill_state_change_event(struct ath11k_base *ab, - struct sk_buff *skb) -{ - const struct wmi_rfkill_state_change_ev *ev; - const void **tb; - int ret; - - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); - if (IS_ERR(tb)) { - ret = PTR_ERR(tb); - ath11k_warn(ab, "failed to parse tlv: %d\n", ret); - return; - } - - ev = tb[WMI_TAG_RFKILL_EVENT]; - if (!ev) { - kfree(tb); - return; - } - - ath11k_dbg(ab, ATH11K_DBG_MAC, - "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", - ev->gpio_pin_num, - ev->int_type, - ev->radio_state); - - spin_lock_bh(&ab->base_lock); - ab->rfkill_radio_on = (ev->radio_state == WMI_RFKILL_RADIO_STATE_ON); - spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); - queue_work(ab->workqueue, &ab->rfkill_work); kfree(tb); } @@ -7381,7 +8422,7 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, const struct wmi_pdev_temperature_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); @@ -7395,18 +8436,22 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, return; } - ath11k_dbg(ab, ATH11K_DBG_WMI, - "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id); + ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n", + ev->temp, ev->pdev_id); + + rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id); - kfree(tb); - return; + goto exit; } ath11k_thermal_event_temperature(ar, ev->temp); +exit: + rcu_read_unlock(); + kfree(tb); } @@ -7417,7 +8462,7 @@ static void ath11k_fils_discovery_event(struct ath11k_base *ab, const struct wmi_fils_discovery_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, @@ -7426,6 +8471,8 @@ static void ath11k_fils_discovery_event(struct ath11k_base *ab, return; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event fils discovery"); + ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch FILS discovery event\n"); @@ -7447,7 +8494,7 @@ static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab, const struct wmi_probe_resp_tx_status_event *ev; int ret; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, @@ -7456,6 +8503,8 @@ static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab, return; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event probe resp tx status"); + ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; if (!ev) { ath11k_warn(ab, @@ -7522,6 +8571,8 @@ static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_b return; } + ath11k_dbg(ab, ATH11K_DBG_WMI, "event wow wakeup host"); + complete(&ab->wow.wakeup_completed); } @@ -7529,9 +8580,178 @@ static void ath11k_wmi_diag_event(struct ath11k_base *ab, struct sk_buff *skb) { + ath11k_dbg(ab, ATH11K_DBG_WMI, "event diag"); + trace_ath11k_wmi_diag(ab, skb->data, skb->len); } +static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status) +{ + switch (status) { + case WMI_ADD_TWT_STATUS_OK: + return "ok"; + case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED: + return "twt disabled"; + case WMI_ADD_TWT_STATUS_USED_DIALOG_ID: + return "dialog id in use"; + case WMI_ADD_TWT_STATUS_INVALID_PARAM: + return "invalid parameters"; + case WMI_ADD_TWT_STATUS_NOT_READY: + return "not ready"; + case WMI_ADD_TWT_STATUS_NO_RESOURCE: + return "resource unavailable"; + case WMI_ADD_TWT_STATUS_NO_ACK: + return "no ack"; + case WMI_ADD_TWT_STATUS_NO_RESPONSE: + return "no response"; + case WMI_ADD_TWT_STATUS_DENIED: + return "denied"; + case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR: + fallthrough; + default: + return "unknown error"; + } +} + +static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_twt_add_dialog_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, + "failed to parse wmi twt add dialog status event tlv: %d\n", + ret); + return; + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, "event twt add dialog"); + + ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n"); + goto exit; + } + + if (ev->status) + ath11k_warn(ab, + "wmi add twt dialog event vdev %d dialog id %d status %s\n", + ev->vdev_id, ev->dialog_id, + ath11k_wmi_twt_add_dialog_event_status(ev->status)); + +exit: + kfree(tb); +} + +static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_gtk_offload_status_event *ev; + struct ath11k_vif *arvif; + __be64 replay_ctr_be; + u64 replay_ctr; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch gtk offload status ev"); + kfree(tb); + return; + } + + rcu_read_lock(); + + arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); + if (!arvif) { + ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n", + ev->vdev_id); + goto exit; + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n", + ev->refresh_cnt); + ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt", + NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES); + + replay_ctr = ev->replay_ctr.word1; + replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0; + arvif->rekey_data.replay_ctr = replay_ctr; + + /* supplicant expects big-endian replay counter */ + replay_ctr_be = cpu_to_be64(replay_ctr); + + ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, + (void *)&replay_ctr_be, GFP_ATOMIC); +exit: + rcu_read_unlock(); + + kfree(tb); +} + +static void ath11k_wmi_p2p_noa_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_p2p_noa_event *ev; + const struct ath11k_wmi_p2p_noa_info *noa; + struct ath11k *ar; + int vdev_id; + u8 noa_descriptors; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ath11k_warn(ab, "failed to parse tlv: %ld\n", PTR_ERR(tb)); + return; + } + + ev = tb[WMI_TAG_P2P_NOA_EVENT]; + noa = tb[WMI_TAG_P2P_NOA_INFO]; + + if (!ev || !noa) + goto out; + + vdev_id = ev->vdev_id; + noa_descriptors = u32_get_bits(noa->noa_attr, + WMI_P2P_NOA_INFO_DESC_NUM); + + if (noa_descriptors > WMI_P2P_MAX_NOA_DESCRIPTORS) { + ath11k_warn(ab, "invalid descriptor num %d in P2P NoA event\n", + noa_descriptors); + goto out; + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi tlv p2p noa vdev_id %i descriptors %u\n", + vdev_id, noa_descriptors); + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id %d in P2P NoA event\n", + vdev_id); + goto unlock; + } + + ath11k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); + +unlock: + rcu_read_unlock(); +out: + kfree(tb); +} + static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -7557,7 +8777,10 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) ath11k_service_ready_ext2_event(ab, skb); break; case WMI_REG_CHAN_LIST_CC_EVENTID: - ath11k_reg_chan_list_event(ab, skb); + ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_ID); + break; + case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: + ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_EXT_ID); break; case WMI_READY_EVENTID: ath11k_ready_event(ab, skb); @@ -7614,6 +8837,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb); break; + case WMI_PDEV_UTF_EVENTID: + ath11k_tm_wmi_event(ab, id, skb); + break; case WMI_PDEV_TEMPERATURE_EVENTID: ath11k_wmi_pdev_temperature_event(ab, skb); break; @@ -7629,15 +8855,8 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: ath11k_wmi_obss_color_collision_event(ab, skb); break; - /* add Unsupported events here */ - case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: - case WMI_PEER_OPER_MODE_CHANGE_EVENTID: - case WMI_TWT_ENABLE_EVENTID: - case WMI_TWT_DISABLE_EVENTID: - case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: - case WMI_PEER_CREATE_CONF_EVENTID: - ath11k_dbg(ab, ATH11K_DBG_WMI, - "ignoring unsupported event 0x%x\n", id); + case WMI_TWT_ADD_DIALOG_EVENTID: + ath11k_wmi_twt_add_dialog_event(ab, skb); break; case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb); @@ -7651,15 +8870,20 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_11D_NEW_COUNTRY_EVENTID: ath11k_reg_11d_new_cc_event(ab, skb); break; - case WMI_RFKILL_STATE_CHANGE_EVENTID: - ath11k_rfkill_state_change_event(ab, skb); - break; case WMI_DIAG_EVENTID: ath11k_wmi_diag_event(ab, skb); break; - /* TODO: Add remaining events */ + case WMI_PEER_STA_PS_STATECHG_EVENTID: + ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb); + break; + case WMI_GTK_OFFLOAD_STATUS_EVENTID: + ath11k_wmi_gtk_offload_status_event(ab, skb); + break; + case WMI_P2P_NOA_EVENTID: + ath11k_wmi_p2p_noa_event(ab, skb); + break; default: - ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id); + ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id); break; } @@ -7756,7 +8980,7 @@ ath11k_wmi_send_unit_test_cmd(struct ath11k *ar, } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI unit test : module %d vdev %d n_args %d token %d\n", + "cmd unit test module %d vdev %d n_args %d token %d\n", cmd->module_id, cmd->vdev_id, cmd->num_args, cmd->diag_token); @@ -7798,6 +9022,62 @@ int ath11k_wmi_simulate_radar(struct ath11k *ar) return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); } +int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap, + struct ath11k_fw_dbglog *dbglog) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_debug_log_config_cmd_fixed_param *cmd; + struct sk_buff *skb; + struct wmi_tlv *tlv; + int ret, len; + + len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->dbg_log_param = dbglog->param; + + tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd)); + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); + + switch (dbglog->param) { + case WMI_DEBUG_LOG_PARAM_LOG_LEVEL: + case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE: + case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE: + case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP: + cmd->value = dbglog->value; + break; + case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP: + case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP: + cmd->value = dbglog->value; + memcpy(tlv->value, module_id_bitmap, + MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); + /* clear current config to be used for next user config */ + memset(module_id_bitmap, 0, + MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); + break; + default: + dev_kfree_skb(skb); + return -EINVAL; + } + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID); + if (ret) { + ath11k_warn(ar->ab, + "failed to send WMI_DBGLOG_CFG_CMDID\n"); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd dbglog cfg"); + + return ret; +} + int ath11k_wmi_connect(struct ath11k_base *ab) { u32 i; @@ -7851,7 +9131,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; /* It's overwritten when service_ext_ready is handled */ - if (ab->hw_params.single_pdev_only) + if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; /* TODO: Init remaining wmi soc resources required */ @@ -7873,6 +9153,39 @@ void ath11k_wmi_detach(struct ath11k_base *ab) ath11k_wmi_free_dbring_caps(ab); } +int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id, + u32 filter_bitmap, bool enable) +{ + struct wmi_hw_data_filter_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_hw_data_filter_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->enable = enable; + + /* Set all modes in case of disable */ + if (cmd->enable) + cmd->hw_filter_bitmap = filter_bitmap; + else + cmd->hw_filter_bitmap = ((u32)~0U); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "hw data filter enable %d filter_bitmap 0x%x\n", + enable, filter_bitmap); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); +} + int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar) { struct wmi_wow_host_wakeup_ind *cmd; @@ -7889,7 +9202,7 @@ int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar) WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow host wakeup ind\n"); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); } @@ -7911,7 +9224,7 @@ int ath11k_wmi_wow_enable(struct ath11k *ar) cmd->enable = 1; cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow enable\n"); + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow enable\n"); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); } @@ -7938,8 +9251,659 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->prob_req_oui = prob_req_oui; - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi scan prob req oui %d\n", + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "scan prob req oui %d\n", prob_req_oui); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID); } + +int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable) +{ + struct wmi_wow_add_del_event_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->is_add = enable; + cmd->event_bitmap = (1 << event); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add wakeup event %s enable %d vdev_id %d\n", + wow_wakeup_event(event), enable, vdev_id); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); +} + +int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset) +{ + struct wmi_wow_add_pattern_cmd *cmd; + struct wmi_wow_bitmap_pattern *bitmap; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *ptr; + size_t len; + + len = sizeof(*cmd) + + sizeof(*tlv) + /* array struct */ + sizeof(*bitmap) + /* bitmap */ + sizeof(*tlv) + /* empty ipv4 sync */ + sizeof(*tlv) + /* empty ipv6 sync */ + sizeof(*tlv) + /* empty magic */ + sizeof(*tlv) + /* empty info timeout */ + sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + /* cmd */ + ptr = (u8 *)skb->data; + cmd = (struct wmi_wow_add_pattern_cmd *)ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_ADD_PATTERN_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->pattern_id = pattern_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + + ptr += sizeof(*cmd); + + /* bitmap */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap)); + + ptr += sizeof(*tlv); + + bitmap = (struct wmi_wow_bitmap_pattern *)ptr; + bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_BITMAP_PATTERN_T) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE); + + memcpy(bitmap->patternbuf, pattern, pattern_len); + ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4)); + memcpy(bitmap->bitmaskbuf, mask, pattern_len); + ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4)); + bitmap->pattern_offset = pattern_offset; + bitmap->pattern_len = pattern_len; + bitmap->bitmask_len = pattern_len; + bitmap->pattern_id = pattern_id; + + ptr += sizeof(*bitmap); + + /* ipv4 sync */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* ipv6 sync */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* magic */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* pattern info timeout */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* ratelimit interval */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, sizeof(u32)); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n", + vdev_id, pattern_id, pattern_offset); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); +} + +int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id) +{ + struct wmi_wow_del_pattern_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_DEL_PATTERN_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->pattern_id = pattern_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow del pattern vdev_id %d pattern_id %d\n", + vdev_id, pattern_id); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); +} + +static struct sk_buff * +ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar, + u32 vdev_id, + struct wmi_pno_scan_req *pno) +{ + struct nlo_configured_parameters *nlo_list; + struct wmi_wow_nlo_config_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u32 *channel_list; + size_t len, nlo_list_len, channel_list_len; + u8 *ptr; + u32 i; + + len = sizeof(*cmd) + + sizeof(*tlv) + + /* TLV place holder for array of structures + * nlo_configured_parameters(nlo_list) + */ + sizeof(*tlv); + /* TLV place holder for array of uint32 channel_list */ + + channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; + len += channel_list_len; + + nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; + len += nlo_list_len; + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (u8 *)skb->data; + cmd = (struct wmi_wow_nlo_config_cmd *)ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = pno->vdev_id; + cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN; + + /* current FW does not support min-max range for dwell time */ + cmd->active_dwell_time = pno->active_max_time; + cmd->passive_dwell_time = pno->passive_max_time; + + if (pno->do_passive_scan) + cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE; + + cmd->fast_scan_period = pno->fast_scan_period; + cmd->slow_scan_period = pno->slow_scan_period; + cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles; + cmd->delay_start_time = pno->delay_start_time; + + if (pno->enable_pno_scan_randomization) { + cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ; + ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); + ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); + ath11k_ce_byte_swap(cmd->mac_addr.addr, 8); + ath11k_ce_byte_swap(cmd->mac_mask.addr, 8); + } + + ptr += sizeof(*cmd); + + /* nlo_configured_parameters(nlo_list) */ + cmd->no_of_ssids = pno->uc_networks_count; + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, nlo_list_len); + + ptr += sizeof(*tlv); + nlo_list = (struct nlo_configured_parameters *)ptr; + for (i = 0; i < cmd->no_of_ssids; i++) { + tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv)); + + nlo_list[i].ssid.valid = true; + nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; + memcpy(nlo_list[i].ssid.ssid.ssid, + pno->a_networks[i].ssid.ssid, + nlo_list[i].ssid.ssid.ssid_len); + ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid, + roundup(nlo_list[i].ssid.ssid.ssid_len, 4)); + + if (pno->a_networks[i].rssi_threshold && + pno->a_networks[i].rssi_threshold > -300) { + nlo_list[i].rssi_cond.valid = true; + nlo_list[i].rssi_cond.rssi = + pno->a_networks[i].rssi_threshold; + } + + nlo_list[i].bcast_nw_type.valid = true; + nlo_list[i].bcast_nw_type.bcast_nw_type = + pno->a_networks[i].bcast_nw_type; + } + + ptr += nlo_list_len; + cmd->num_of_channels = pno->a_networks[0].channel_count; + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, channel_list_len); + ptr += sizeof(*tlv); + channel_list = (u32 *)ptr; + for (i = 0; i < cmd->num_of_channels; i++) + channel_list[i] = pno->a_networks[0].channels[i]; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv start pno config vdev_id %d\n", + vdev_id); + + return skb; +} + +static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar, + u32 vdev_id) +{ + struct wmi_wow_nlo_config_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->flags = WMI_NLO_CONFIG_STOP; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "tlv stop pno config vdev_id %d\n", vdev_id); + return skb; +} + +int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id, + struct wmi_pno_scan_req *pno_scan) +{ + struct sk_buff *skb; + + if (pno_scan->enable) + skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); + else + skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id); + + if (IS_ERR_OR_NULL(skb)) + return -ENOMEM; + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); +} + +static void ath11k_wmi_fill_ns_offload(struct ath11k *ar, + struct ath11k_arp_ns_offload *offload, + u8 **ptr, + bool enable, + bool ext) +{ + struct wmi_ns_offload_tuple *ns; + struct wmi_tlv *tlv; + u8 *buf_ptr = *ptr; + u32 ns_cnt, ns_ext_tuples; + int i, max_offloads; + + ns_cnt = offload->ipv6_count; + + tlv = (struct wmi_tlv *)buf_ptr; + + if (ext) { + ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns)); + i = WMI_MAX_NS_OFFLOADS; + max_offloads = offload->ipv6_count; + } else { + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns)); + i = 0; + max_offloads = WMI_MAX_NS_OFFLOADS; + } + + buf_ptr += sizeof(*tlv); + + for (; i < max_offloads; i++) { + ns = (struct wmi_ns_offload_tuple *)buf_ptr; + ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE); + + if (enable) { + if (i < ns_cnt) + ns->flags |= WMI_NSOL_FLAGS_VALID; + + memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); + memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); + ath11k_ce_byte_swap(ns->target_ipaddr[0], 16); + ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16); + + if (offload->ipv6_type[i]) + ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST; + + memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); + ath11k_ce_byte_swap(ns->target_mac.addr, 8); + + if (ns->target_mac.word0 != 0 || + ns->target_mac.word1 != 0) { + ns->flags |= WMI_NSOL_FLAGS_MAC_VALID; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "index %d ns_solicited %pI6 target %pI6", + i, ns->solicitation_ipaddr, + ns->target_ipaddr[0]); + } + + buf_ptr += sizeof(*ns); + } + + *ptr = buf_ptr; +} + +static void ath11k_wmi_fill_arp_offload(struct ath11k *ar, + struct ath11k_arp_ns_offload *offload, + u8 **ptr, + bool enable) +{ + struct wmi_arp_offload_tuple *arp; + struct wmi_tlv *tlv; + u8 *buf_ptr = *ptr; + int i; + + /* fill arp tuple */ + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); + buf_ptr += sizeof(*tlv); + + for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + arp = (struct wmi_arp_offload_tuple *)buf_ptr; + arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); + + if (enable && i < offload->ipv4_count) { + /* Copy the target ip addr and flags */ + arp->flags = WMI_ARPOL_FLAGS_VALID; + memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); + ath11k_ce_byte_swap(arp->target_ipaddr, 4); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "arp offload address %pI4", + arp->target_ipaddr); + } + + buf_ptr += sizeof(*arp); + } + + *ptr = buf_ptr; +} + +int ath11k_wmi_arp_ns_offload(struct ath11k *ar, + struct ath11k_vif *arvif, bool enable) +{ + struct ath11k_arp_ns_offload *offload; + struct wmi_set_arp_ns_offload_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *buf_ptr; + size_t len; + u8 ns_cnt, ns_ext_tuples = 0; + + offload = &arvif->arp_ns_offload; + ns_cnt = offload->ipv6_count; + + len = sizeof(*cmd) + + sizeof(*tlv) + + WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) + + sizeof(*tlv) + + WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple); + + if (ns_cnt > WMI_MAX_NS_OFFLOADS) { + ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; + len += sizeof(*tlv) + + ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple); + } + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + buf_ptr = skb->data; + cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->flags = 0; + cmd->vdev_id = arvif->vdev_id; + cmd->num_ns_ext_tuples = ns_ext_tuples; + + buf_ptr += sizeof(*cmd); + + ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); + ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); + + if (ns_ext_tuples) + ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); +} + +int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar, + struct ath11k_vif *arvif, bool enable) +{ + struct wmi_gtk_rekey_offload_cmd *cmd; + struct ath11k_rekey_data *rekey_data = &arvif->rekey_data; + int len; + struct sk_buff *skb; + __le64 replay_ctr; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = arvif->vdev_id; + + if (enable) { + cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE; + + /* the length in rekey_data and cmd is equal */ + memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); + ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES); + memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); + ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES); + + replay_ctr = cpu_to_le64(rekey_data->replay_ctr); + memcpy(cmd->replay_ctr, &replay_ctr, + sizeof(replay_ctr)); + ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES); + } else { + cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", + arvif->vdev_id, enable); + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, + struct ath11k_vif *arvif) +{ + struct wmi_gtk_rekey_offload_cmd *cmd; + int len; + struct sk_buff *skb; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = arvif->vdev_id; + cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n", + arvif->vdev_id); + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val) +{ struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_set_sar_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *buf_ptr; + u32 len, sar_len_aligned, rsvd_len_aligned; + + sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32)); + rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32)); + len = sizeof(*cmd) + + TLV_HDR_SIZE + sar_len_aligned + + TLV_HDR_SIZE + rsvd_len_aligned; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->pdev_id = ar->pdev->pdev_id; + cmd->sar_len = BIOS_SAR_TABLE_LEN; + cmd->rsvd_len = BIOS_SAR_RSVD1_LEN; + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, sar_len_aligned); + buf_ptr += TLV_HDR_SIZE; + memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN); + + buf_ptr += sar_len_aligned; + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); + + return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); +} + +int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_set_geo_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *buf_ptr; + u32 len, rsvd_len_aligned; + + rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32)); + len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->pdev_id = ar->pdev->pdev_id; + cmd->rsvd_len = BIOS_SAR_RSVD2_LEN; + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); + + return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); +} + +int ath11k_wmi_sta_keepalive(struct ath11k *ar, + const struct wmi_sta_keepalive_arg *arg) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_sta_keepalive_cmd *cmd; + struct wmi_sta_keepalive_arp_resp *arp; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd) + sizeof(*arp); + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_keepalive_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_STA_KEEPALIVE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->vdev_id = arg->vdev_id; + cmd->enabled = arg->enabled; + cmd->interval = arg->interval; + cmd->method = arg->method; + + arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1); + arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); + + if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || + arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { + arp->src_ip4_addr = arg->src_ip4_addr; + arp->dest_ip4_addr = arg->dest_ip4_addr; + ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "sta keepalive vdev %d enabled %d method %d interval %d\n", + arg->vdev_id, arg->enabled, arg->method, arg->interval); + + return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); +} + +bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar) +{ + return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, + ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; +} |
