summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath12k/wmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath12k/wmi.c')
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c1804
1 files changed, 1662 insertions, 142 deletions
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 11cc3005c0f9..abb510d235a5 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -19,6 +19,7 @@
#include "mac.h"
#include "hw.h"
#include "peer.h"
+#include "p2p.h"
struct ath12k_wmi_svc_ready_parse {
bool wmi_svc_bitmap_done;
@@ -162,6 +163,14 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+ [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_enable_event) },
+ [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_disable_event) },
+ [WMI_TAG_P2P_NOA_INFO] = {
+ .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
+ [WMI_TAG_P2P_NOA_EVENT] = {
+ .min_len = sizeof(struct wmi_p2p_noa_event) },
};
static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -179,18 +188,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config)
{
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
-
- if (ab->num_radios == 2) {
- config->num_peers = TARGET_NUM_PEERS(DBS);
- config->num_tids = TARGET_NUM_TIDS(DBS);
- } else if (ab->num_radios == 3) {
- config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
- config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
- } else {
- /* Control should not reach here */
- config->num_peers = TARGET_NUM_PEERS(SINGLE);
- config->num_tids = TARGET_NUM_TIDS(SINGLE);
- }
+ config->num_peers = ab->num_radios *
+ ath12k_core_get_max_peers_per_radio(ab);
+ config->num_tids = ath12k_core_get_max_num_tids(ab);
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
@@ -228,6 +228,12 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+ config->ema_max_vap_cnt = ab->num_radios;
+ config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+ config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+ config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -359,8 +365,8 @@ static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
}
static const void **
-ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
- size_t len, gfp_t gfp)
+ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
{
const void **tb;
int ret;
@@ -369,7 +375,7 @@ ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
if (!tb)
return ERR_PTR(-ENOMEM);
- ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
+ ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
@@ -493,13 +499,14 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
- pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
+ pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
- fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
ab->fw_pdev_count++;
@@ -727,6 +734,20 @@ static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *sk
return 0;
}
+static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath12k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params->single_pdev_only &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
{
struct sk_buff *skb;
@@ -752,6 +773,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_mgmt_send_cmd *cmd;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
u32 buf_len;
@@ -770,7 +792,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->desc_id = cpu_to_le32(buf_id);
- cmd->chanfreq = 0;
+ cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->frame_len = cpu_to_le32(frame->len);
@@ -799,6 +821,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
+ bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
+ struct wmi_vdev_create_mlo_params *ml_params;
struct wmi_tlv *tlv;
int ret, len;
void *ptr;
@@ -808,7 +832,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
* both the bands.
*/
len = sizeof(*cmd) + TLV_HDR_SIZE +
- (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
+ (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -823,9 +848,14 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_subtype = cpu_to_le32(args->subtype);
cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
cmd->pdev_id = cpu_to_le32(args->pdev_id);
+ cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+ if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
+ cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
+
ptr = skb->data + sizeof(*cmd);
len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
@@ -837,20 +867,35 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
len = sizeof(*txrx_streams);
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
- txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
txrx_streams->supported_tx_streams =
- args->chains[NL80211_BAND_2GHZ].tx;
+ cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
txrx_streams->supported_rx_streams =
- args->chains[NL80211_BAND_2GHZ].rx;
+ cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
txrx_streams++;
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
- txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
txrx_streams->supported_tx_streams =
- args->chains[NL80211_BAND_5GHZ].tx;
+ cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
txrx_streams->supported_rx_streams =
- args->chains[NL80211_BAND_5GHZ].rx;
+ cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
+
+ ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ if (is_ml_vdev) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
+ sizeof(*ml_params));
+ ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
+ }
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
@@ -993,19 +1038,27 @@ static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart)
{
+ struct wmi_vdev_start_mlo_params *ml_params;
+ struct wmi_partner_link_info *partner_info;
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_channel_params *chan;
struct wmi_tlv *tlv;
void *ptr;
- int ret, len;
+ int ret, len, i, ml_arg_size = 0;
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+ if (!restart && arg->ml.enabled) {
+ ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ len += ml_arg_size;
+ }
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1024,6 +1077,8 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->regdomain = cpu_to_le32(arg->regdomain);
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
+ cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
if (!restart) {
if (arg->ssid) {
@@ -1051,11 +1106,66 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
/* Note: This is a nested TLV containing:
- * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
+ if (ml_arg_size) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
+ sizeof(*ml_params));
+
+ ml_params->flags = le32_encode_bits(arg->ml.enabled,
+ ATH12K_WMI_FLAG_MLO_ENABLED) |
+ le32_encode_bits(arg->ml.assoc_link,
+ ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
+ le32_encode_bits(arg->ml.mcast_link,
+ ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
+ le32_encode_bits(arg->ml.link_add,
+ ATH12K_WMI_FLAG_MLO_LINK_ADD);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
+ arg->vdev_id, ml_params->flags);
+
+ ptr += sizeof(*ml_params);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ ptr += TLV_HDR_SIZE;
+
+ partner_info = ptr;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ partner_info->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+ sizeof(*partner_info));
+ partner_info->vdev_id =
+ cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ ether_addr_copy(partner_info->vdev_addr.addr,
+ arg->ml.partner_info[i].addr);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
+ partner_info->vdev_id, partner_info->hw_link_id,
+ partner_info->vdev_addr.addr);
+
+ partner_info++;
+ }
+
+ ptr = partner_info;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
restart ? "restart" : "start", arg->vdev_id,
arg->freq, arg->mode);
@@ -1075,7 +1185,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
return ret;
}
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
@@ -1090,14 +1200,20 @@ int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
sizeof(*cmd));
- cmd->vdev_id = cpu_to_le32(vdev_id);
- cmd->vdev_assoc_id = cpu_to_le32(aid);
+ cmd->vdev_id = cpu_to_le32(params->vdev_id);
+ cmd->vdev_assoc_id = cpu_to_le32(params->aid);
- ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+ ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
+
+ if (params->tx_bssid) {
+ ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
+ cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
+ cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
+ }
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
- vdev_id, aid, bssid);
+ params->vdev_id, params->aid, params->bssid);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
@@ -1114,9 +1230,14 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
- int ret;
+ int ret, len;
+ struct wmi_peer_create_mlo_params *ml_param;
+ void *ptr;
+ struct wmi_tlv *tlv;
- skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1128,9 +1249,23 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
cmd->peer_type = cpu_to_le32(arg->peer_type);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ptr = skb->data + sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_param));
+ ptr += TLV_HDR_SIZE;
+ ml_param = ptr;
+ ml_param->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
+ sizeof(*ml_param));
+ if (arg->ml_enabled)
+ ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ ptr += sizeof(*ml_param);
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
- "WMI peer create vdev_id %d peer_addr %pM\n",
- arg->vdev_id, arg->peer_addr);
+ "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
+ arg->vdev_id, arg->peer_addr, ml_param->flags);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
if (ret) {
@@ -1503,6 +1638,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
sizeof(*cmd));
cmd->req_type = cpu_to_le32(type);
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI bss chan info req type %d\n", type);
@@ -1710,15 +1846,59 @@ int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
return ret;
}
+int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
+ size_t p2p_ie_len, aligned_len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+
+ p2p_ie_len = p2p_ie[1] + 2;
+ aligned_len = roundup(p2p_ie_len, sizeof(u32));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
+
+ ptr += sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ aligned_len);
+ memcpy(tlv->value, p2p_ie, p2p_ie_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
- struct sk_buff *bcn)
+ struct sk_buff *bcn,
+ struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_bcn_tmpl_cmd *cmd;
struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
+ u32 ema_params = 0;
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
@@ -1737,6 +1917,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
cmd->buf_len = cpu_to_le32(bcn->len);
+ cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
+ if (ema_args) {
+ u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
+ u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
+ if (ema_args->bcn_index == 0)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
+ if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
+ cmd->ema_params = cpu_to_le32(ema_params);
+ }
ptr = skb->data + sizeof(*cmd);
@@ -1836,7 +2026,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
if (arg->bw_160)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
if (arg->bw_320)
- cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
+ cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
/* Typically if STBC is enabled for VHT it should be enabled
* for HT as well
@@ -1911,12 +2101,15 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct ath12k_wmi_vht_rate_set_params *mcs;
struct ath12k_wmi_he_rate_set_params *he_mcs;
struct ath12k_wmi_eht_rate_set_params *eht_mcs;
+ struct wmi_peer_assoc_mlo_params *ml_params;
+ struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 peer_legacy_rates_align;
u32 peer_ht_rates_align;
int i, ret, len;
+ __le32 v;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
sizeof(u32));
@@ -1928,8 +2121,13 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
sizeof(*mcs) + TLV_HDR_SIZE +
(sizeof(*he_mcs) * arg->peer_he_mcs_count) +
- TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
- TLV_HDR_SIZE + TLV_HDR_SIZE;
+ TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
+
+ if (arg->ml.enabled)
+ len += TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
+ else
+ len += (2 * TLV_HDR_SIZE);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -2053,12 +2251,38 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*he_mcs);
}
- /* MLO header tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? sizeof(*ml_params) : 0;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (!len)
+ goto skip_ml_params;
+
+ ml_params = ptr;
+ ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
+ len);
+ ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.assoc_link)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+ if (arg->ml.primary_umac)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.logical_link_idx_valid)
+ ml_params->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
+
+ if (arg->ml.peer_id_valid)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
+
+ ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
+ ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
+ ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
+ ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
+ ptr += sizeof(*ml_params);
+skip_ml_params:
/* Loop through the EHT rate set */
len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
tlv = ptr;
@@ -2075,12 +2299,45 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*eht_mcs);
}
- /* ML partner links tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
+ /* fill ML Partner links */
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (len == 0)
+ goto send;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
+
+ partner_info = ptr;
+ partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
+ sizeof(*partner_info));
+ partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.partner_info[i].assoc_link)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+ if (arg->ml.partner_info[i].primary_umac)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.partner_info[i].logical_link_idx_valid) {
+ v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
+ partner_info->flags |= v;
+ }
+
+ partner_info->logical_link_idx =
+ cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
+ ptr += sizeof(*partner_info);
+ }
+
+send:
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
@@ -2130,7 +2387,7 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->scan_f_chan_stat_evnt = 1;
arg->num_bssid = 1;
/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
@@ -2659,6 +2916,149 @@ int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
return ret;
}
+int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
+ const u8 *buf, size_t buf_len)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_interface_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *ptr;
+ u32 len, len_aligned;
+ int ret;
+
+ len_aligned = roundup(buf_len, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->param_type_id = cpu_to_le32(param_id);
+ cmd->length = cpu_to_le32(buf_len);
+
+ ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
+ ptr += TLV_HDR_SIZE;
+ memcpy(ptr, buf, buf_len);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
+ param_id, ret);
+ dev_kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_sar_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf_ptr;
+ u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
+ const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
+ const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
+
+ sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
+ sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
+ sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
+ TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
+ cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
+ sar_table_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
+
+ buf_ptr += sar_table_len_aligned;
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
+ sar_dbs_backoff_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_geo_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf_ptr;
+ u32 len, sar_geo_len_aligned;
+ const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
+
+ sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
@@ -3260,13 +3660,19 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
- wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
+ wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
+ WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
+ wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
+ WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+ wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
+ wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
+ wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
}
static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
@@ -3479,6 +3885,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.res_cfg.is_reg_cc_ext_event_supported = true;
ab->hw_params->wmi_init(ab, &arg.res_cfg);
+ ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
arg.num_mem_chunks = wmi_ab->num_mem_chunks;
arg.hw_mode_id = wmi_ab->preferred_hw_mode;
@@ -3490,6 +3897,8 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.num_band_to_mac = ab->num_radios;
ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
+ ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
+
return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
}
@@ -3597,7 +4006,7 @@ int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
sizeof(*cmd));
- cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
cmd->module_id = cpu_to_le32(arg->module_id);
cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
@@ -3974,6 +4383,7 @@ static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
+ ab->num_db_cap = 0;
}
static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
@@ -4214,7 +4624,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
for (i = 0; i < ab->fw_pdev_count; i++) {
struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
- if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) &&
+ if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
bands = fw_pdev->supported_bands;
break;
@@ -4252,6 +4662,9 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
caps->eht_cap_info_internal);
}
+ pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
+ pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
+
return 0;
}
@@ -4271,7 +4684,8 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
return 0;
} else {
for (i = 0; i < ab->num_radios; i++) {
- if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id))
+ if (ab->pdevs[i].pdev_id ==
+ ath12k_wmi_caps_ext_get_pdev_id(caps))
break;
}
@@ -4374,7 +4788,7 @@ static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buf
const struct wmi_vdev_start_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4437,6 +4851,22 @@ static struct ath12k_reg_rule
return reg_rule_ptr;
}
+static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
+ u32 num_reg_rules)
+{
+ u8 num_invalid_5ghz_rules = 0;
+ u32 count, start_freq;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
+
+ if (start_freq >= ATH12K_MIN_6G_FREQ)
+ num_invalid_5ghz_rules++;
+ }
+
+ return num_invalid_5ghz_rules;
+}
+
static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
struct sk_buff *skb,
struct ath12k_reg_info *reg_info)
@@ -4447,12 +4877,13 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
u32 num_2g_reg_rules, num_5g_reg_rules;
u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u8 num_invalid_5ghz_ext_rules;
u32 total_reg_rules = 0;
int ret, i, j;
ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4540,20 +4971,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
- /* FIXME: Currently FW includes 6G reg rule also in 5G rule
- * list for country US.
- * Having same 6G reg rule in 5G and 6G rules list causes
- * intersect check to be true, and same rules will be shown
- * multiple times in iw cmd. So added hack below to avoid
- * parsing 6G rule from 5G reg rule list, and this can be
- * removed later, after FW updates to remove 6G reg rule
- * from 5G rules list.
- */
- if (memcmp(reg_info->alpha2, "US", 2) == 0) {
- reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
- num_5g_reg_rules = reg_info->num_5g_reg_rules;
- }
-
reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
reg_info->num_phy = le32_to_cpu(ev->num_phy);
@@ -4656,8 +5073,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
+ ext_wmi_reg_rule += num_2g_reg_rules;
+
+ /* Firmware might include 6 GHz reg rule in 5 GHz rule list
+ * for few countries along with separate 6 GHz rule.
+ * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
+ * causes intersect check to be true, and same rules will be
+ * shown multiple times in iw cmd.
+ * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
+ */
+ num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
+ num_5g_reg_rules);
+
+ if (num_invalid_5ghz_ext_rules) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
+ reg_info->alpha2, reg_info->num_5g_reg_rules,
+ num_invalid_5ghz_ext_rules);
+
+ num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
+ reg_info->num_5g_reg_rules = num_5g_reg_rules;
+ }
+
if (num_5g_reg_rules) {
- ext_wmi_reg_rule += num_2g_reg_rules;
reg_info->reg_rules_5g_ptr =
create_ext_reg_rules_from_wmi(num_5g_reg_rules,
ext_wmi_reg_rule);
@@ -4669,7 +5107,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
- ext_wmi_reg_rule += num_5g_reg_rules;
+ /* We have adjusted the number of 5 GHz reg rules above. But still those
+ * many rules needs to be adjusted in ext_wmi_reg_rule.
+ *
+ * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
+ */
+ ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
reg_info->reg_rules_6g_ap_ptr[i] =
@@ -4738,7 +5181,7 @@ static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *
const struct wmi_peer_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4770,7 +5213,7 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
const struct wmi_vdev_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4790,15 +5233,15 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
return 0;
}
-static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
- u32 len, u32 *vdev_id,
- u32 *tx_status)
+static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4826,7 +5269,7 @@ static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_
const struct wmi_vdev_stopped_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4948,7 +5391,10 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status_irqsafe(ar->hw, msdu);
+ if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -4970,7 +5416,7 @@ static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5006,6 +5452,10 @@ static void ath12k_wmi_event_scan_started(struct ath12k *ar)
break;
case ATH12K_SCAN_STARTING:
ar->scan.state = ATH12K_SCAN_RUNNING;
+
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
+
complete(&ar->scan.started);
break;
}
@@ -5076,6 +5526,8 @@ static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
{
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
@@ -5087,7 +5539,11 @@ static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
break;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
- ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
+
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+
break;
}
}
@@ -5141,7 +5597,7 @@ static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_scan_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5174,7 +5630,7 @@ static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buf
const struct wmi_peer_sta_kickout_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5201,7 +5657,7 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_roam_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5226,13 +5682,14 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
static int freq_to_idx(struct ath12k *ar, int freq)
{
struct ieee80211_supported_band *sband;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
int band, ch, idx = 0;
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
if (!ar->mac.sbands[band].channels)
continue;
- sband = ar->hw->wiphy->bands[band];
+ sband = hw->wiphy->bands[band];
if (!sband)
continue;
@@ -5245,14 +5702,14 @@ exit:
return idx;
}
-static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, struct wmi_chan_info_event *ch_info_ev)
+static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5291,7 +5748,7 @@ ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5331,7 +5788,7 @@ ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *sk
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5362,7 +5819,7 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
const struct wmi_peer_assoc_conf_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5384,13 +5841,13 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
}
static int
-ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, const struct wmi_pdev_temperature_event *ev)
+ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ const struct wmi_pdev_temperature_event *ev)
{
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5469,7 +5926,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
* event. Otherwise, it goes to fallback.
*/
if (ab->hw_params->single_pdev_only &&
- pdev_idx < ab->hw_params->num_rxmda_per_pdev)
+ pdev_idx < ab->hw_params->num_rxdma_per_pdev)
goto mem_free;
else
goto fallback;
@@ -5725,8 +6182,7 @@ static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *s
{
u32 vdev_id, tx_status;
- if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
- &vdev_id, &tx_status) != 0) {
+ if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
ath12k_warn(ab, "failed to extract bcn tx status");
return;
}
@@ -5788,7 +6244,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
goto exit;
}
- if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
+ if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
(rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
WMI_RX_STATUS_ERR_CRC))) {
@@ -5799,8 +6255,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
@@ -5821,8 +6279,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
sband = &ar->mac.sbands[status->band];
- status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
- status->band);
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
@@ -5849,13 +6309,11 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
}
}
- /* TODO: Pending handle beacon implementation
- *if (ieee80211_is_beacon(hdr->frame_control))
- * ath12k_mac_handle_beacon(ar, skb);
- */
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath12k_mac_handle_beacon(ar, skb);
ath12k_dbg(ab, ATH12K_DBG_MGMT,
- "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
@@ -5864,7 +6322,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
- ieee80211_rx_ni(ar->hw, skb);
+ ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
exit:
rcu_read_unlock();
@@ -5915,7 +6373,8 @@ static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
spin_lock_bh(&ar->data_lock);
if (ar->scan.state == state &&
- ar->scan.vdev_id == vdev_id) {
+ ar->scan.arvif &&
+ ar->scan.arvif->vdev_id == vdev_id) {
spin_unlock_bh(&ar->data_lock);
return ar;
}
@@ -6037,7 +6496,7 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
- sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
arg.mac_addr, NULL);
if (!sta) {
ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
@@ -6059,42 +6518,44 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_roam_event roam_ev = {};
struct ath12k *ar;
+ u32 vdev_id;
+ u8 roam_reason;
if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
ath12k_warn(ab, "failed to extract roam event");
return;
}
+ vdev_id = le32_to_cpu(roam_ev.vdev_id);
+ roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
+ WMI_ROAM_REASON_MASK);
+
ath12k_dbg(ab, ATH12K_DBG_WMI,
- "wmi roam event vdev %u reason 0x%08x rssi %d\n",
- roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+ "wmi roam event vdev %u reason %d rssi %d\n",
+ vdev_id, roam_reason, roam_ev.rssi);
rcu_read_lock();
- ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
- ath12k_warn(ab, "invalid vdev id in roam ev %d",
- roam_ev.vdev_id);
+ ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
rcu_read_unlock();
return;
}
- if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
+ if (roam_reason >= WMI_ROAM_REASON_MAX)
ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
- roam_ev.reason, roam_ev.vdev_id);
+ roam_reason, vdev_id);
- switch (le32_to_cpu(roam_ev.reason)) {
+ switch (roam_reason) {
case WMI_ROAM_REASON_BEACON_MISS:
- /* TODO: Pending beacon miss and connection_loss_work
- * implementation
- * ath12k_mac_handle_beacon_miss(ar, vdev_id);
- */
+ ath12k_mac_handle_beacon_miss(ar, vdev_id);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
case WMI_ROAM_REASON_HO_FAILED:
ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
- roam_ev.reason, roam_ev.vdev_id);
+ roam_reason, vdev_id);
break;
}
@@ -6110,7 +6571,7 @@ static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
- if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
ath12k_warn(ab, "failed to extract chan info event");
return;
}
@@ -6395,7 +6856,7 @@ static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6429,7 +6890,9 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const u32 *vdev_ids)
{
int i;
- struct ath12k_vif *arvif;
+ struct ieee80211_bss_conf *conf;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_vif *ahvif;
/* Finish CSA once the switch count becomes NULL */
if (ev->current_switch_count)
@@ -6444,9 +6907,23 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
vdev_ids[i]);
continue;
}
+ ahvif = arvif->ahvif;
+
+ if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
+ arvif->link_id);
+ continue;
+ }
+
+ conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+ if (!conf) {
+ ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ continue;
+ }
- if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif);
+ if (arvif->is_up && conf->csa_active)
+ ieee80211_csa_finish(ahvif->vif, 0);
}
rcu_read_unlock();
}
@@ -6460,7 +6937,7 @@ ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
const u32 *vdev_ids;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6490,11 +6967,12 @@ static void
ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
{
const void **tb;
+ struct ath12k_mac_get_any_chanctx_conf_arg arg;
const struct ath12k_wmi_pdev_radar_event *ev;
struct ath12k *ar;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6525,13 +7003,22 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
+ arg.ar = ar;
+ arg.chanctx_conf = NULL;
+ ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
+ ath12k_mac_get_any_chanctx_conf_iter, &arg);
+ if (!arg.chanctx_conf) {
+ ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
+ goto exit;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
ev->pdev_id);
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
exit:
rcu_read_unlock();
@@ -6546,7 +7033,7 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
struct ath12k *ar;
struct wmi_pdev_temperature_event ev = {0};
- if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+ if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
ath12k_warn(ab, "failed to extract pdev temperature event");
return;
}
@@ -6573,7 +7060,7 @@ static void ath12k_fils_discovery_event(struct ath12k_base *ab,
const struct wmi_fils_discovery_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6603,7 +7090,7 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6628,6 +7115,56 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
kfree(tb);
}
+static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_p2p_noa_event *ev;
+ const struct ath12k_wmi_p2p_noa_info *noa;
+ struct ath12k *ar;
+ int ret, vdev_id;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_P2P_NOA_EVENT];
+ noa = tb[WMI_TAG_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
+ vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
+ vdev_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+
+ ret = 0;
+
+unlock:
+ rcu_read_unlock();
+out:
+ kfree(tb);
+ return ret;
+}
+
static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -6635,7 +7172,7 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6662,6 +7199,253 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
kfree(tb);
}
+static void
+ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ trace_ath12k_wmi_diag(ab, skb->data, skb->len);
+}
+
+static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_enable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_disable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_wow_ev_pg_fault_param *pf_param;
+ const struct wmi_wow_ev_param *param;
+ struct wmi_wow_ev_arg *arg = data;
+ int pf_len;
+
+ switch (tag) {
+ case WMI_TAG_WOW_EVENT_INFO:
+ param = ptr;
+ arg->wake_reason = le32_to_cpu(param->wake_reason);
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
+ arg->wake_reason, wow_reason(arg->wake_reason));
+ break;
+
+ case WMI_TAG_ARRAY_BYTE:
+ if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
+ pf_param = ptr;
+ pf_len = le32_to_cpu(pf_param->len);
+ if (pf_len > len - sizeof(pf_len) ||
+ pf_len < 0) {
+ ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
+ pf_len);
+ return -EINVAL;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
+ pf_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
+ "wow_reason_page_fault packet present",
+ "wow_pg_fault ",
+ pf_param->data,
+ pf_len);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_wow_ev_arg arg = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_wow_wakeup_host_parse,
+ &arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ complete(&ab->wow.wakeup_completed);
+}
+
+static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath12k_link_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ rcu_read_lock();
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
+ if (!arvif) {
+ rcu_read_unlock();
+ ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ le32_to_cpu(ev->vdev_id));
+ kfree(tb);
+ return;
+ }
+
+ replay_ctr = le64_to_cpu(ev->replay_ctr);
+ arvif->rekey_data.replay_ctr = replay_ctr;
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
+ le32_to_cpu(ev->refresh_cnt), replay_ctr);
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
+static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_mlo_setup_complete_event *ev;
+ struct ath12k *ar = NULL;
+ struct ath12k_pdev *pdev;
+ const void **tb;
+ int ret, i;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
+ kfree(tb);
+ return;
+ }
+
+ if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
+ goto skip_lookup;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
+ ar = pdev->ar;
+ break;
+ }
+ }
+
+skip_lookup:
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
+ ev->pdev_id, ev->status);
+ goto out;
+ }
+
+ ar->mlo_setup_status = le32_to_cpu(ev->status);
+ complete(&ar->mlo_setup_done);
+
+out:
+ kfree(tb);
+}
+
+static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_mlo_teardown_complete_event *ev;
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch teardown complete event\n");
+ kfree(tb);
+ return;
+ }
+
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -6757,14 +7541,14 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_RFKILL_STATE_CHANGE_EVENTID:
ath12k_rfkill_state_change_event(ab, skb);
break;
- /* add Unsupported events here */
- case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
- case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
+ ath12k_wmi_twt_enable_event(ab, skb);
+ break;
case WMI_TWT_DISABLE_EVENTID:
- case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
- ath12k_dbg(ab, ATH12K_DBG_WMI,
- "ignoring unsupported event 0x%x\n", id);
+ ath12k_wmi_twt_disable_event(ab, skb);
+ break;
+ case WMI_P2P_NOA_EVENTID:
+ ath12k_wmi_p2p_noa_event(ab, skb);
break;
case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
@@ -6772,6 +7556,34 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_VDEV_DELETE_RESP_EVENTID:
ath12k_vdev_delete_resp_event(ab, skb);
break;
+ case WMI_DIAG_EVENTID:
+ ath12k_wmi_diag_event(ab, skb);
+ break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath12k_wmi_event_wow_wakeup_host(ab, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath12k_wmi_gtk_offload_status_event(ab, skb);
+ break;
+ case WMI_MLO_SETUP_COMPLETE_EVENTID:
+ ath12k_wmi_event_mlo_setup_complete(ab, skb);
+ break;
+ case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
+ ath12k_wmi_event_teardown_complete(ab, skb);
+ break;
+ /* add Unsupported events (rare) here */
+ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+ case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+ case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "ignoring unsupported event 0x%x\n", id);
+ break;
+ /* add Unsupported events (frequent) here */
+ case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
+ case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
+ case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+ /* debug might flood hence silently ignore (no-op) */
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -6786,9 +7598,11 @@ static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
u32 pdev_idx)
{
int status;
- u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
- ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
- ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+ static const u32 svc_id[] = {
+ ATH12K_HTC_SVC_ID_WMI_CONTROL,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
+ };
struct ath12k_htc_svc_conn_req conn_req = {};
struct ath12k_htc_svc_conn_resp conn_resp = {};
@@ -6874,13 +7688,13 @@ ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
int ath12k_wmi_simulate_radar(struct ath12k *ar)
{
- struct ath12k_vif *arvif;
+ struct ath12k_link_vif *arvif;
u32 dfs_args[DFS_MAX_TEST_ARGS];
struct wmi_unit_test_cmd wmi_ut;
bool arvif_found = false;
list_for_each_entry(arvif, &ar->arvifs, list) {
- if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
arvif_found = true;
break;
}
@@ -6981,3 +7795,709 @@ void ath12k_wmi_detach(struct ath12k_base *ab)
ath12k_wmi_free_dbring_caps(ab);
}
+
+int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
+
+ /* Set all modes in case of disable */
+ if (arg->enable)
+ cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
+ else
+ cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi hw data filter enable %d filter_bitmap 0x%x\n",
+ arg->enable, arg->hw_filter_bitmap);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
+int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
+{
+ struct wmi_wow_host_wakeup_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ sizeof(*cmd));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+}
+
+int ath12k_wmi_wow_enable(struct ath12k *ar)
+{
+ struct wmi_wow_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_enable_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
+ sizeof(*cmd));
+
+ cmd->enable = cpu_to_le32(1);
+ cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
+}
+
+int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->is_add = cpu_to_le32(enable);
+ cmd->event_bitmap = cpu_to_le32((1 << event));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern_params *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = ptr;
+ bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
+ sizeof(*bitmap));
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ bitmap->pattern_offset = cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = cpu_to_le32(pattern_len);
+ bitmap->pattern_id = cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
+ vdev_id, pattern_id, pattern_offset, pattern_len);
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
+ bitmap->patternbuf, pattern_len);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
+ bitmap->bitmaskbuf, pattern_len);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno)
+{
+ struct nlo_configured_params *nlo_list;
+ size_t len, nlo_list_len, channel_list_len;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ __le32 *channel_list;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_params(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(pno->vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
+ cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
+
+ if (pno->do_passive_scan)
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
+
+ cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
+ cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
+ cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
+ cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_params(nlo_list) */
+ cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = ptr;
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ sizeof(*nlo_list));
+
+ nlo_list[i].ssid.valid = cpu_to_le32(1);
+ nlo_list[i].ssid.ssid.ssid_len =
+ cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
+ nlo_list[i].rssi_cond.rssi =
+ cpu_to_le32(pno->a_networks[i].rssi_threshold);
+ }
+
+ nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ cpu_to_le32(pno->a_networks[i].bcast_nw_type);
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = ptr;
+
+ for (i = 0; i < pno->a_networks[0].channel_count; i++)
+ channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_params *ns;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = buf_ptr;
+ ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
+ sizeof(*ns));
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+
+ if (!is_zero_ether_addr(ns->target_mac.addr))
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_params *arp;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = buf_ptr;
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
+ sizeof(*arp));
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload,
+ bool enable)
+{
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
+ }
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = buf_ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ sizeof(*cmd));
+ cmd->flags = cpu_to_le32(0);
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
+
+ buf_ptr += sizeof(*cmd);
+
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
+ struct ath12k_link_vif *arvif, bool enable)
+{
+ struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+
+ if (enable) {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ } else {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
+ struct ath12k_link_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_sta_keepalive(struct ath12k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct wmi_sta_keepalive_arp_resp_params *arp;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enabled = cpu_to_le32(arg->enabled);
+ cmd->interval = cpu_to_le32(arg->interval);
+ cmd->method = cpu_to_le32(arg->method);
+
+ arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ sizeof(*arp));
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
+ arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}
+
+int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
+{
+ struct wmi_mlo_setup_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ u32 *partner_links, num_links;
+ int i, ret, buf_len, arg_len;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+
+ num_links = mlo_params->num_partner_links;
+ arg_len = num_links * sizeof(u32);
+ buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_setup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
+ sizeof(*cmd));
+ cmd->mld_group_id = mlo_params->group_id;
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
+ ptr += TLV_HDR_SIZE;
+
+ partner_links = ptr;
+ for (i = 0; i < num_links; i++)
+ partner_links[i] = mlo_params->partner_link_id[i];
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_mlo_ready(struct ath12k *ar)
+{
+ struct wmi_mlo_ready_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_ready_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_mlo_teardown(struct ath12k *ar)
+{
+ struct wmi_mlo_teardown_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}