diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/wmi.c')
| -rw-r--r-- | drivers/net/wireless/ath/ath10k/wmi.c | 264 |
1 files changed, 178 insertions, 86 deletions
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index a81a1ab2de19..b4aad6604d6d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -3,6 +3,8 @@ * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/skbuff.h> @@ -740,6 +742,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID, .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID, + .per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID, }; static struct wmi_peer_param_map wmi_peer_param_map = { @@ -1761,12 +1764,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch, int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { - unsigned long time_left; + unsigned long time_left, i; time_left = wait_for_completion_timeout(&ar->wmi.service_ready, WMI_SERVICE_READY_TIMEOUT_HZ); - if (!time_left) - return -ETIMEDOUT; + if (!time_left) { + /* Sometimes the PCI HIF doesn't receive interrupt + * for the service ready message even if the buffer + * was completed. PCIe sniffer shows that it's + * because the corresponding CE ring doesn't fires + * it. Workaround here by polling CE rings once. + */ + ath10k_warn(ar, "failed to receive service ready completion, polling..\n"); + + for (i = 0; i < CE_COUNT; i++) + ath10k_hif_send_complete_check(ar, i, 1); + + time_left = wait_for_completion_timeout(&ar->wmi.service_ready, + WMI_SERVICE_READY_TIMEOUT_HZ); + if (!time_left) { + ath10k_warn(ar, "polling timed out\n"); + return -ETIMEDOUT; + } + + ath10k_warn(ar, "service ready completion received, continuing normally\n"); + } + return 0; } @@ -1893,7 +1916,7 @@ static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) { ieee80211_iterate_active_interfaces_atomic(ar->hw, - IEEE80211_IFACE_ITER_NORMAL, + ATH10K_ITER_NORMAL_FLAGS, ath10k_wmi_tx_beacons_iter, NULL); } @@ -1915,10 +1938,16 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) if (cmd_id == WMI_CMD_UNSUPPORTED) { ath10k_warn(ar, "wmi command %d is not supported by firmware\n", cmd_id); + dev_kfree_skb_any(skb); return ret; } wait_event_timeout(ar->wmi.tx_credits_wq, ({ + if (ar->state == ATH10K_STATE_WEDGED) { + ret = -ESHUTDOWN; + ath10k_dbg(ar, ATH10K_DBG_WMI, + "drop wmi command %d, hardware is wedged\n", cmd_id); + } /* try to send pending beacons first. they take priority */ ath10k_wmi_tx_beacons_nowait(ar); @@ -1936,7 +1965,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) if (ret == -EAGAIN) { ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n", cmd_id); - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); } return ret; @@ -2007,7 +2036,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr)); memcpy(cmd->buf, msdu->data, msdu->len); - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", msdu, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); trace_ath10k_tx_hdr(ar, skb->data, skb->len); @@ -2419,6 +2448,7 @@ wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param) dma_unmap_single(ar->dev, pkt_addr->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); + kfree(pkt_addr); if (param->status) { info->flags &= ~IEEE80211_TX_STAT_ACK; @@ -2426,7 +2456,7 @@ wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param) info->flags |= IEEE80211_TX_STAT_ACK; info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR + param->ack_rssi; - info->status.is_valid_ack_signal = true; + info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } ieee80211_tx_status_irqsafe(ar->hw, msdu); @@ -2609,8 +2639,12 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) if (ieee80211_is_beacon(hdr->frame_control)) ath10k_mac_handle_beacon(ar, skb); + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + status->boottime_ns = ktime_get_boottime_ns(); + ath10k_dbg(ar, ATH10K_DBG_MGMT, - "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", + "event mgmt rx skb %p len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); @@ -2794,7 +2828,7 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) switch (ar->scan.state) { case ATH10K_SCAN_IDLE: case ATH10K_SCAN_STARTING: - ath10k_warn(ar, "received chan info event without a scan request, ignoring\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n"); goto exit; case ATH10K_SCAN_RUNNING: case ATH10K_SCAN_ABORTING: @@ -2866,7 +2900,7 @@ void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, dst->hw_reaped = __le32_to_cpu(src->hw_reaped); dst->underrun = __le32_to_cpu(src->underrun); dst->tx_abort = __le32_to_cpu(src->tx_abort); - dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed); + dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued); dst->tx_ko = __le32_to_cpu(src->tx_ko); dst->data_rc = __le32_to_cpu(src->data_rc); dst->self_triggers = __le32_to_cpu(src->self_triggers); @@ -2894,7 +2928,7 @@ ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src, dst->hw_reaped = __le32_to_cpu(src->hw_reaped); dst->underrun = __le32_to_cpu(src->underrun); dst->tx_abort = __le32_to_cpu(src->tx_abort); - dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed); + dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued); dst->tx_ko = __le32_to_cpu(src->tx_ko); dst->data_rc = __le32_to_cpu(src->data_rc); dst->self_triggers = __le32_to_cpu(src->self_triggers); @@ -3496,7 +3530,7 @@ void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb) return; } - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", + ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n", arg.mac_addr); rcu_read_lock(); @@ -3550,7 +3584,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, __le32 t; u32 v, tim_len; - /* When FW reports 0 in tim_len, ensure atleast first byte + /* When FW reports 0 in tim_len, ensure at least first byte * in tim_bitmap is considered for pvm calculation. */ tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1; @@ -3877,13 +3911,13 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) * Once CSA counter is completed stop sending beacons until * actual channel switch is done */ - if (arvif->vif->csa_active && - ieee80211_csa_is_complete(arvif->vif)) { - ieee80211_csa_finish(arvif->vif); + if (arvif->vif->bss_conf.csa_active && + ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) { + ieee80211_csa_finish(arvif->vif, 0); continue; } - bcn = ieee80211_beacon_get(ar->hw, arvif->vif); + bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0); if (!bcn) { ath10k_warn(ar, "could not get mac80211 beacon\n"); continue; @@ -3964,7 +3998,7 @@ static void ath10k_radar_detected(struct ath10k *ar) if (ar->dfs_block_radar_events) ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); else - ieee80211_radar_detected(ar->hw); + ieee80211_radar_detected(ar->hw, NULL); } static void ath10k_radar_confirmation_work(struct work_struct *work) @@ -5750,8 +5784,13 @@ void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb) ret); } - ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, - __le32_to_cpu(arg.service_map_ext_len)); + /* + * Initialization of "arg.service_map_ext_valid" to ZERO is necessary + * for the below logic to work. + */ + if (arg.service_map_ext_valid) + ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, + __le32_to_cpu(arg.service_map_ext_len)); } static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) @@ -6551,7 +6590,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar) struct wmi_init_cmd *cmd; struct sk_buff *buf; struct wmi_resource_config config = {}; - u32 len, val; + u32 val; config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS); @@ -6603,10 +6642,8 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar) config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); - len = sizeof(*cmd) + - (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); - - buf = ath10k_wmi_alloc_skb(ar, len); + buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, + ar->wmi.num_mem_chunks)); if (!buf) return ERR_PTR(-ENOMEM); @@ -6624,7 +6661,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar) struct wmi_init_cmd_10x *cmd; struct sk_buff *buf; struct wmi_resource_config_10x config = {}; - u32 len, val; + u32 val; config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); @@ -6668,10 +6705,8 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar) config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); - len = sizeof(*cmd) + - (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); - - buf = ath10k_wmi_alloc_skb(ar, len); + buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, + ar->wmi.num_mem_chunks)); if (!buf) return ERR_PTR(-ENOMEM); @@ -6689,7 +6724,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) struct wmi_init_cmd_10_2 *cmd; struct sk_buff *buf; struct wmi_resource_config_10x config = {}; - u32 len, val, features; + u32 val, features; config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); @@ -6741,10 +6776,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); - len = sizeof(*cmd) + - (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); - - buf = ath10k_wmi_alloc_skb(ar, len); + buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, + ar->wmi.num_mem_chunks)); if (!buf) return ERR_PTR(-ENOMEM); @@ -6776,7 +6809,6 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) struct wmi_init_cmd_10_4 *cmd; struct sk_buff *buf; struct wmi_resource_config_10_4 config = {}; - u32 len; config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs); config.num_peers = __cpu_to_le32(ar->max_num_peers); @@ -6838,10 +6870,8 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG); config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG); - len = sizeof(*cmd) + - (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); - - buf = ath10k_wmi_alloc_skb(ar, len); + buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items, + ar->wmi.num_mem_chunks)); if (!buf) return ERR_PTR(-ENOMEM); @@ -6925,14 +6955,14 @@ void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn, } static void -ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs, +ath10k_wmi_put_start_scan_tlvs(u8 *tlvs, const struct wmi_start_scan_arg *arg) { struct wmi_ie_data *ie; struct wmi_chan_list *channels; struct wmi_ssid_list *ssids; struct wmi_bssid_list *bssids; - void *ptr = tlvs->tlvs; + void *ptr = tlvs; int i; if (arg->n_channels) { @@ -7010,7 +7040,7 @@ ath10k_wmi_op_gen_start_scan(struct ath10k *ar, cmd = (struct wmi_start_scan_cmd *)skb->data; ath10k_wmi_put_start_scan_common(&cmd->common, arg); - ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg); + ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg); cmd->burst_duration_ms = __cpu_to_le32(0); @@ -7039,7 +7069,7 @@ ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar, cmd = (struct wmi_10x_start_scan_cmd *)skb->data; ath10k_wmi_put_start_scan_common(&cmd->common, arg); - ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg); + ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n"); return skb; @@ -7471,6 +7501,49 @@ ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, return skb; } +static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar, + u32 gpio_num, u32 input, + u32 pull_type, u32 intr_mode) +{ + struct wmi_gpio_config_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_gpio_config_cmd *)skb->data; + cmd->pull_type = __cpu_to_le32(pull_type); + cmd->gpio_num = __cpu_to_le32(gpio_num); + cmd->input = __cpu_to_le32(input); + cmd->intr_mode = __cpu_to_le32(intr_mode); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n", + gpio_num, input, pull_type, intr_mode); + + return skb; +} + +static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar, + u32 gpio_num, u32 set) +{ + struct wmi_gpio_output_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_gpio_output_cmd *)skb->data; + cmd->gpio_num = __cpu_to_le32(gpio_num); + cmd->set = __cpu_to_le32(set); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n", + gpio_num, set); + + return skb; +} + static struct sk_buff * ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, enum wmi_sta_ps_mode psmode) @@ -7509,7 +7582,7 @@ ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id, cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(value); - ath10k_dbg(ar, ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_STA, "wmi sta ps param vdev_id 0x%x param %d value %d\n", vdev_id, param_id, value); return skb; @@ -7549,12 +7622,9 @@ ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar, struct sk_buff *skb; struct wmi_channel_arg *ch; struct wmi_channel *ci; - int len; int i; - len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); - - skb = ath10k_wmi_alloc_skb(ar, len); + skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels)); if (!skb) return ERR_PTR(-EINVAL); @@ -8166,28 +8236,6 @@ ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param) return skb; } -size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head) -{ - struct ath10k_fw_stats_peer *i; - size_t num = 0; - - list_for_each_entry(i, head, list) - ++num; - - return num; -} - -size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head) -{ - struct ath10k_fw_stats_vdev *i; - size_t num = 0; - - list_for_each_entry(i, head, list) - ++num; - - return num; -} - static void ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev, char *buf, u32 *length) @@ -8276,7 +8324,7 @@ ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev, len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PPDUs cleaned", pdev->tx_abort); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs requed", pdev->mpdus_requed); + "MPDUs requeued", pdev->mpdus_requeued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Excessive retries", pdev->tx_ko); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", @@ -8464,8 +8512,8 @@ void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar, goto unlock; } - num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers); - num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs); + num_peers = list_count_nodes(&fw_stats->peers); + num_vdevs = list_count_nodes(&fw_stats->vdevs); ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); @@ -8522,8 +8570,8 @@ void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar, goto unlock; } - num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers); - num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs); + num_peers = list_count_nodes(&fw_stats->peers); + num_vdevs = list_count_nodes(&fw_stats->vdevs); ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len); @@ -8670,8 +8718,8 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, goto unlock; } - num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers); - num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs); + num_peers = list_count_nodes(&fw_stats->peers); + num_vdevs = list_count_nodes(&fw_stats->vdevs); ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len); @@ -8756,9 +8804,9 @@ int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA; case WMI_VDEV_SUBTYPE_MESH_11S: case WMI_VDEV_SUBTYPE_MESH_NON_11S: - return -ENOTSUPP; + return -EOPNOTSUPP; } - return -ENOTSUPP; + return -EOPNOTSUPP; } static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar, @@ -8778,9 +8826,9 @@ static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar, case WMI_VDEV_SUBTYPE_MESH_11S: return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S; case WMI_VDEV_SUBTYPE_MESH_NON_11S: - return -ENOTSUPP; + return -EOPNOTSUPP; } - return -ENOTSUPP; + return -EOPNOTSUPP; } static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar, @@ -8802,7 +8850,7 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar, case WMI_VDEV_SUBTYPE_MESH_NON_11S: return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S; } - return -ENOTSUPP; + return -EOPNOTSUPP; } static struct sk_buff * @@ -8941,8 +8989,6 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar, if (!skb) return ERR_PTR(-ENOMEM); - memset(skb->data, 0, sizeof(*cmd)); - cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); @@ -9005,6 +9051,39 @@ ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar, } static struct sk_buff * +ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar, + const struct wmi_per_peer_per_tid_cfg_arg *arg) +{ + struct wmi_peer_per_tid_cfg_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + memset(skb->data, 0, sizeof(*cmd)); + + cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data; + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr); + cmd->tid = cpu_to_le32(arg->tid); + cmd->ack_policy = cpu_to_le32(arg->ack_policy); + cmd->aggr_control = cpu_to_le32(arg->aggr_control); + cmd->rate_control = cpu_to_le32(arg->rate_ctrl); + cmd->retry_count = cpu_to_le32(arg->retry_count); + cmd->rcode_flags = cpu_to_le32(arg->rcode_flags); + cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap); + cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n", + arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control, + arg->rate_ctrl, arg->rcode_flags, arg->retry_count, + arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr); + return skb; +} + +static struct sk_buff * ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value) { struct wmi_echo_cmd *cmd; @@ -9129,6 +9208,9 @@ static const struct wmi_ops wmi_ops = { .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, .gen_echo = ath10k_wmi_op_gen_echo, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, + /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -9199,6 +9281,8 @@ static const struct wmi_ops wmi_10_1_ops = { .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, .gen_echo = ath10k_wmi_op_gen_echo, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -9271,6 +9355,8 @@ static const struct wmi_ops wmi_10_2_ops = { .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, /* .gen_pdev_enable_adaptive_cca not implemented */ }; @@ -9342,6 +9428,8 @@ static const struct wmi_ops wmi_10_2_4_ops = { ath10k_wmi_op_gen_pdev_enable_adaptive_cca, .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype, .gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -9413,6 +9501,7 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_pdev_get_tpc_table_cmdid = ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid, .gen_radar_found = ath10k_wmi_10_4_gen_radar_found, + .gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg, /* shared with 10.2 */ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, @@ -9422,6 +9511,8 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info, .gen_echo = ath10k_wmi_op_gen_echo, .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, }; int ath10k_wmi_attach(struct ath10k *ar) @@ -9523,12 +9614,13 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr, struct sk_buff *msdu; ath10k_dbg(ar, ATH10K_DBG_WMI, - "force cleanup mgmt msdu_id %hu\n", msdu_id); + "force cleanup mgmt msdu_id %u\n", msdu_id); msdu = pkt_addr->vaddr; dma_unmap_single(ar->dev, pkt_addr->paddr, msdu->len, DMA_TO_DEVICE); ieee80211_free_txskb(ar->hw, msdu); + kfree(pkt_addr); return 0; } |
