diff options
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/mt76.h')
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt76.h | 1030 |
1 files changed, 870 insertions, 160 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 404c3d1a70d6..d05e83ea1cac 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> */ @@ -13,23 +13,55 @@ #include <linux/leds.h> #include <linux/usb.h> #include <linux/average.h> +#include <linux/soc/airoha/airoha_offload.h> +#include <linux/soc/mediatek/mtk_wed.h> #include <net/mac80211.h> +#include <net/page_pool/helpers.h> #include "util.h" #include "testmode.h" #define MT_MCU_RING_SIZE 32 #define MT_RX_BUF_SIZE 2048 -#define MT_SKB_HEAD_LEN 128 +#define MT_SKB_HEAD_LEN 256 #define MT_MAX_NON_AQL_PKT 16 #define MT_TXQ_FREE_THR 32 #define MT76_TOKEN_FREE_THR 64 +#define MT_QFLAG_WED_RING GENMASK(1, 0) +#define MT_QFLAG_WED_TYPE GENMASK(4, 2) +#define MT_QFLAG_WED BIT(5) +#define MT_QFLAG_WED_RRO BIT(6) +#define MT_QFLAG_WED_RRO_EN BIT(7) +#define MT_QFLAG_EMI_EN BIT(8) +#define MT_QFLAG_NPU BIT(9) + +#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \ + FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ + FIELD_PREP(MT_QFLAG_WED_RING, _n)) +#define __MT_WED_RRO_Q(_type, _n) (MT_QFLAG_WED_RRO | __MT_WED_Q(_type, _n)) + +#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n) +#define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n) +#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0) +#define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n) +#define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n) +#define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0) +#define MT_WED_RRO_Q_RXDMAD_C __MT_WED_RRO_Q(MT76_WED_RRO_Q_RXDMAD_C, 0) + +#define __MT_NPU_Q(_type, _n) (MT_QFLAG_NPU | \ + FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ + FIELD_PREP(MT_QFLAG_WED_RING, _n)) +#define MT_NPU_Q_TX(_n) __MT_NPU_Q(MT76_WED_Q_TX, _n) +#define MT_NPU_Q_RX(_n) __MT_NPU_Q(MT76_WED_Q_RX, _n) + struct mt76_dev; struct mt76_phy; struct mt76_wcid; struct mt76s_intr; +struct mt76_chanctx; +struct mt76_vif_link; struct mt76_reg_pair { u32 reg; @@ -42,6 +74,22 @@ enum mt76_bus_type { MT76_BUS_SDIO, }; +enum mt76_wed_type { + MT76_WED_Q_TX, + MT76_WED_Q_TXFREE, + MT76_WED_Q_RX, + MT76_WED_RRO_Q_DATA, + MT76_WED_RRO_Q_MSDU_PG, + MT76_WED_RRO_Q_IND, + MT76_WED_RRO_Q_RXDMAD_C, +}; + +enum mt76_hwrro_mode { + MT76_HWRRO_OFF, + MT76_HWRRO_V3, + MT76_HWRRO_V3_1, +}; + struct mt76_bus_ops { u32 (*rr)(struct mt76_dev *dev, u32 offset); void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); @@ -83,11 +131,34 @@ enum mt76_rxq_id { MT_RXQ_MAIN, MT_RXQ_MCU, MT_RXQ_MCU_WA, - MT_RXQ_EXT, - MT_RXQ_EXT_WA, + MT_RXQ_BAND1, + MT_RXQ_BAND1_WA, + MT_RXQ_MAIN_WA, + MT_RXQ_BAND2, + MT_RXQ_BAND2_WA, + MT_RXQ_RRO_BAND0, + MT_RXQ_RRO_BAND1, + MT_RXQ_RRO_BAND2, + MT_RXQ_MSDU_PAGE_BAND0, + MT_RXQ_MSDU_PAGE_BAND1, + MT_RXQ_MSDU_PAGE_BAND2, + MT_RXQ_TXFREE_BAND0, + MT_RXQ_TXFREE_BAND1, + MT_RXQ_TXFREE_BAND2, + MT_RXQ_RRO_IND, + MT_RXQ_RRO_RXDMAD_C, + MT_RXQ_NPU0, + MT_RXQ_NPU1, __MT_RXQ_MAX }; +enum mt76_band_id { + MT_BAND0, + MT_BAND1, + MT_BAND2, + __MT_MAX_BAND +}; + enum mt76_cipher_type { MT_CIPHER_NONE, MT_CIPHER_WEP40, @@ -104,10 +175,27 @@ enum mt76_cipher_type { MT_CIPHER_GCMP_256, }; +enum mt76_dfs_state { + MT_DFS_STATE_UNKNOWN, + MT_DFS_STATE_DISABLED, + MT_DFS_STATE_CAC, + MT_DFS_STATE_ACTIVE, +}; + +#define MT76_RNR_SCAN_MAX_BSSIDS 16 +struct mt76_scan_rnr_param { + u8 bssid[MT76_RNR_SCAN_MAX_BSSIDS][ETH_ALEN]; + u8 channel[MT76_RNR_SCAN_MAX_BSSIDS]; + u8 random_mac[ETH_ALEN]; + u8 seq_num; + u8 bssid_num; + u32 sreq_flag; +}; + struct mt76_queue_buf { dma_addr_t addr; - u16 len; - bool skip_unmap; + u16 len:15, + skip_unmap:1; }; struct mt76_tx_info { @@ -127,7 +215,7 @@ struct mt76_queue_entry { struct urb *urb; int buf_sz; }; - u32 dma_addr[2]; + dma_addr_t dma_addr[2]; u16 dma_len[2]; u16 wcid; bool skip_buf0:1; @@ -148,11 +236,14 @@ struct mt76_queue { spinlock_t lock; spinlock_t cleanup_lock; struct mt76_queue_entry *entry; + struct mt76_rro_desc *rro_desc; struct mt76_desc *desc; u16 first; u16 head; u16 tail; + u8 hw_idx; + u8 ep; int ndesc; int queued; int buf_size; @@ -160,20 +251,29 @@ struct mt76_queue { bool blocked; u8 buf_offset; - u8 hw_idx; - u8 qid; + u16 flags; + u8 magic_cnt; + + __le16 *emi_cpu_idx; + + struct mtk_wed_device *wed; + struct mt76_dev *dev; + u32 wed_regs; dma_addr_t desc_dma; struct sk_buff *rx_head; - struct page_frag_cache rx_page; + struct page_pool *page_pool; }; struct mt76_mcu_ops { + unsigned int max_retry; u32 headroom; u32 tailroom; int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, int len, bool wait_resp); + int (*mcu_skb_prepare_msg)(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, int *seq); int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, int cmd, int *seq); int (*mcu_parse_response)(struct mt76_dev *dev, int cmd, @@ -195,9 +295,9 @@ struct mt76_queue_ops { int idx, int n_desc, int bufsize, u32 ring_base); - int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q, - struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta); + int (*tx_queue_skb)(struct mt76_phy *phy, struct mt76_queue *q, + enum mt76_txq_id qid, struct sk_buff *skb, + struct mt76_wcid *wcid, struct ieee80211_sta *sta); int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, u32 tx_info); @@ -210,11 +310,48 @@ struct mt76_queue_ops { void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q, bool flush); + void (*rx_queue_init)(struct mt76_dev *dev, enum mt76_rxq_id qid, + int (*poll)(struct napi_struct *napi, int budget)); + void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q); void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); - void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); + void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q, + bool reset_idx); +}; + +enum mt76_phy_type { + MT_PHY_TYPE_CCK, + MT_PHY_TYPE_OFDM, + MT_PHY_TYPE_HT, + MT_PHY_TYPE_HT_GF, + MT_PHY_TYPE_VHT, + MT_PHY_TYPE_HE_SU = 8, + MT_PHY_TYPE_HE_EXT_SU, + MT_PHY_TYPE_HE_TB, + MT_PHY_TYPE_HE_MU, + MT_PHY_TYPE_EHT_SU = 13, + MT_PHY_TYPE_EHT_TRIG, + MT_PHY_TYPE_EHT_MU, + __MT_PHY_TYPE_MAX, +}; + +struct mt76_sta_stats { + u64 tx_mode[__MT_PHY_TYPE_MAX]; + u64 tx_bw[5]; /* 20, 40, 80, 160, 320 */ + u64 tx_nss[4]; /* 1, 2, 3, 4 */ + u64 tx_mcs[16]; /* mcs idx */ + u64 tx_bytes; + /* WED TX */ + u32 tx_packets; /* unit: MSDU */ + u32 tx_retries; + u32 tx_failed; + /* WED RX */ + u64 rx_bytes; + u32 rx_packets; + u32 rx_errors; + u32 rx_drops; }; enum mt76_wcid_flags { @@ -224,10 +361,10 @@ enum mt76_wcid_flags { MT_WCID_FLAG_HDR_TRANS, }; -#define MT76_N_WCIDS 288 +#define MT76_N_WCIDS 1088 /* stored in ieee80211_tx_info::hw_queue */ -#define MT_TX_HW_QUEUE_EXT_PHY BIT(3) +#define MT_TX_HW_QUEUE_PHY GENMASK(3, 2) DECLARE_EWMA(signal, 10, 8); @@ -246,14 +383,19 @@ struct mt76_wcid { int inactive_count; struct rate_info rate; + unsigned long ampdu_state; u16 idx; u8 hw_key_idx; u8 hw_key_idx2; + u8 offchannel:1; u8 sta:1; - u8 ext_phy:1; + u8 sta_disabled:1; u8 amsdu:1; + u8 phy_idx:2; + u8 link_id:4; + bool link_valid; u8 rx_check_pn; u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6]; @@ -262,23 +404,50 @@ struct mt76_wcid { u32 tx_info; bool sw_iv; + struct list_head tx_list; + struct sk_buff_head tx_pending; + struct sk_buff_head tx_offchannel; + struct list_head list; struct idr pktid; + + struct mt76_sta_stats stats; + + struct list_head poll_list; + + struct mt76_wcid *def_wcid; }; struct mt76_txq { - struct mt76_wcid *wcid; + u16 wcid; u16 agg_ssn; bool send_bar; bool aggr; }; +/* data0 */ +#define RRO_IND_DATA0_IND_REASON_MASK GENMASK(31, 28) +#define RRO_IND_DATA0_START_SEQ_MASK GENMASK(27, 16) +#define RRO_IND_DATA0_SEQ_ID_MASK GENMASK(11, 0) +/* data1 */ +#define RRO_IND_DATA1_MAGIC_CNT_MASK GENMASK(31, 29) +#define RRO_IND_DATA1_IND_COUNT_MASK GENMASK(12, 0) +struct mt76_wed_rro_ind { + __le32 data0; + __le32 data1; +}; + struct mt76_txwi_cache { struct list_head list; dma_addr_t dma_addr; - struct sk_buff *skb; + union { + struct sk_buff *skb; + void *ptr; + }; + + u8 qid; }; struct mt76_rx_tid { @@ -289,6 +458,7 @@ struct mt76_rx_tid { spinlock_t lock; struct delayed_work reorder_work; + u16 id; u16 head; u16 size; u16 nframes; @@ -297,7 +467,7 @@ struct mt76_rx_tid { u8 started:1, stopped:1, timer_pending:1; - struct sk_buff *reorder_buf[]; + struct sk_buff *reorder_buf[] __counted_by(size); }; #define MT_TX_CB_DMA_DONE BIT(0) @@ -307,7 +477,8 @@ struct mt76_rx_tid { #define MT_PACKET_ID_MASK GENMASK(6, 0) #define MT_PACKET_ID_NO_ACK 0 #define MT_PACKET_ID_NO_SKB 1 -#define MT_PACKET_ID_FIRST 2 +#define MT_PACKET_ID_WED 2 +#define MT_PACKET_ID_FIRST 3 #define MT_PACKET_ID_HAS_RATE BIT(7) /* This is timer for when to give up when waiting for TXS callback, * with starting time being the time at which the DMA_DONE callback @@ -326,6 +497,7 @@ struct mt76_tx_cb { enum { MT76_STATE_INITIALIZED, + MT76_STATE_REGISTERED, MT76_STATE_RUNNING, MT76_STATE_MCU_RUNNING, MT76_SCANNING, @@ -340,6 +512,13 @@ enum { MT76_STATE_SUSPEND, MT76_STATE_ROC, MT76_STATE_PM, + MT76_STATE_WED_RESET, +}; + +enum mt76_sta_event { + MT76_STA_EVENT_ASSOC, + MT76_STA_EVENT_AUTHORIZE, + MT76_STA_EVENT_DISASSOC, }; struct mt76_hw_cap { @@ -353,6 +532,8 @@ struct mt76_hw_cap { #define MT_DRV_SW_RX_AIRTIME BIT(2) #define MT_DRV_RX_DMA_HDR BIT(3) #define MT_DRV_HW_MGMT_TXQ BIT(4) +#define MT_DRV_AMSDU_OFFLOAD BIT(5) +#define MT_DRV_IGNORE_TXS_FAILED BIT(6) struct mt76_driver_ops { u32 drv_flags; @@ -361,7 +542,10 @@ struct mt76_driver_ops { u16 token_size; u8 mcs_rates; + unsigned int link_data_size; + void (*update_survey)(struct mt76_phy *phy); + int (*set_channel)(struct mt76_phy *phy); int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, @@ -376,21 +560,34 @@ struct mt76_driver_ops { bool (*rx_check)(struct mt76_dev *dev, void *data, int len); void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, - struct sk_buff *skb); + struct sk_buff *skb, u32 *info); void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); + void (*rx_rro_ind_process)(struct mt76_dev *dev, void *data); + int (*rx_rro_add_msdu_page)(struct mt76_dev *dev, struct mt76_queue *q, + dma_addr_t p, void *data); + void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); - void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + int (*sta_event)(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev); void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); + + int (*vif_link_add)(struct mt76_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct mt76_vif_link *mlink); + + void (*vif_link_remove)(struct mt76_phy *phy, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct mt76_vif_link *mlink); }; struct mt76_channel_state { @@ -408,19 +605,6 @@ struct mt76_sband { struct mt76_channel_state *chan; }; -struct mt76_rate_power { - union { - struct { - s8 cck[4]; - s8 ofdm[8]; - s8 stbc[10]; - s8 ht[16]; - s8 vht[10]; - }; - s8 all[48]; - }; -}; - /* addr req mask */ #define MT_VEND_TYPE_EEPROM BIT(31) #define MT_VEND_TYPE_CFG BIT(30) @@ -492,27 +676,27 @@ struct mt76_usb { struct mt76_reg_pair *rp; int rp_len; u32 base; - bool burst; } mcu; }; -#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE) +#define MT76S_XMIT_BUF_SZ 0x3fe00 #define MT76S_NUM_TX_ENTRIES 256 #define MT76S_NUM_RX_ENTRIES 512 struct mt76_sdio { struct mt76_worker txrx_worker; struct mt76_worker status_worker; struct mt76_worker net_worker; + struct mt76_worker stat_worker; - struct work_struct stat_work; - - u8 *xmit_buf[IEEE80211_NUM_ACS + 2]; + u8 *xmit_buf; + u32 xmit_buf_sz; struct sdio_func *func; void *intr_data; u8 hw_ver; wait_queue_head_t wait; + int pse_mcu_quota_max; struct { int pse_data_quota; int ple_data_quota; @@ -528,6 +712,16 @@ struct mt76_mmio { void __iomem *regs; spinlock_t irq_lock; u32 irqmask; + + struct mtk_wed_device wed; + struct mtk_wed_device wed_hif2; + struct completion wed_reset; + struct completion wed_reset_complete; + + struct airoha_ppe_dev __rcu *ppe_dev; + struct airoha_npu __rcu *npu; + phys_addr_t phy_addr; + int npu_type; }; struct mt76_rx_status { @@ -543,7 +737,7 @@ struct mt76_rx_status { u8 iv[6]; - u8 ext_phy:1; + u8 phy_idx:2; u8 aggr:1; u8 qos_ctl; u16 seqno; @@ -551,12 +745,22 @@ struct mt76_rx_status { u16 freq; u32 flag; u8 enc_flags; - u8 encoding:2, bw:3, he_ru:3; - u8 he_gi:2, he_dcm:1; + u8 encoding:3, bw:4; + union { + struct { + u8 he_ru:3; + u8 he_gi:2; + u8 he_dcm:1; + }; + struct { + u8 ru:4; + u8 gi:2; + } eht; + }; + u8 amsdu:1, first_amsdu:1, last_amsdu:1; u8 rate_idx; - u8 nss; - u8 band; + u8 nss:5, band:3; s8 signal; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; @@ -615,12 +819,31 @@ struct mt76_testmode_data { } rx_stats; }; -struct mt76_vif { +struct mt76_vif_link { u8 idx; + u8 link_idx; u8 omac_idx; u8 band_idx; u8 wmm_idx; u8 scan_seq_num; + u8 cipher; + u8 basic_rates_idx; + u8 mcast_rates_idx; + u8 beacon_rates_idx; + bool offchannel; + struct ieee80211_chanctx_conf *ctx; + struct mt76_wcid *wcid; + struct mt76_vif_data *mvif; + struct rcu_head rcu_head; +}; + +struct mt76_vif_data { + struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + struct mt76_vif_link __rcu *offchannel_link; + + struct mt76_phy *roc_phy; + u16 valid_links; + u8 deflink_id; }; struct mt76_phy { @@ -629,15 +852,30 @@ struct mt76_phy { void *priv; unsigned long state; + unsigned int num_sta; + u8 band_idx; + spinlock_t tx_lock; + struct list_head tx_list; struct mt76_queue *q_tx[__MT_TXQ_MAX]; struct cfg80211_chan_def chandef; - struct ieee80211_channel *main_chan; + struct cfg80211_chan_def main_chandef; + bool offchannel; + bool radar_enabled; + + struct delayed_work roc_work; + struct ieee80211_vif *roc_vif; + struct mt76_vif_link *roc_link; + + struct mt76_chanctx *chanctx; struct mt76_channel_state *chan_state; + enum mt76_dfs_state dfs_state; ktime_t survey_time; + u32 aggr_stats[32]; + struct mt76_hw_cap cap; struct mt76_sband sband_2g; struct mt76_sband sband_5g; @@ -663,15 +901,23 @@ struct mt76_phy { } rx_amsdu[__MT_RXQ_MAX]; struct mt76_freq_range_power *frp; + + struct { + struct led_classdev cdev; + char name[32]; + bool al; + u8 pin; + } leds; }; struct mt76_dev { struct mt76_phy phy; /* must be first */ - - struct mt76_phy *phy2; + struct mt76_phy *phys[__MT_MAX_BAND]; + struct mt76_phy *band_phys[NUM_NL80211_BANDS]; struct ieee80211_hw *hw; + spinlock_t wed_lock; spinlock_t lock; spinlock_t cc_lock; @@ -687,44 +933,55 @@ struct mt76_dev { const struct mt76_driver_ops *drv; const struct mt76_mcu_ops *mcu_ops; struct device *dev; + struct device *dma_dev; struct mt76_mcu mcu; - struct net_device napi_dev; - struct net_device tx_napi_dev; + struct net_device *napi_dev; + struct net_device *tx_napi_dev; spinlock_t rx_lock; struct napi_struct napi[__MT_RXQ_MAX]; struct sk_buff_head rx_skb[__MT_RXQ_MAX]; + struct tasklet_struct irq_tasklet; struct list_head txwi_cache; + struct list_head rxwi_cache; struct mt76_queue *q_mcu[__MT_MCUQ_MAX]; struct mt76_queue q_rx[__MT_RXQ_MAX]; const struct mt76_queue_ops *queue_ops; int tx_dma_idx[4]; + enum mt76_hwrro_mode hwrro_mode; struct mt76_worker tx_worker; struct napi_struct tx_napi; spinlock_t token_lock; struct idr token; - int token_count; + u16 wed_token_count; + u16 token_count; + u16 token_start; + u16 token_size; + + spinlock_t rx_token_lock; + struct idr rx_token; + u16 rx_token_size; wait_queue_head_t tx_wait; /* spinclock used to protect wcid pktid linked list */ spinlock_t status_lock; u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; - u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; - u32 vif_mask; + u64 vif_mask; struct mt76_wcid global_wcid; struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; struct list_head wcid_list; - u32 rev; + struct list_head sta_poll_list; + spinlock_t sta_poll_lock; - u32 aggr_stats[32]; + u32 rev; struct tasklet_struct pre_tbtt_tasklet; int beacon_int; @@ -733,22 +990,27 @@ struct mt76_dev { struct debugfs_blob_wrapper eeprom; struct debugfs_blob_wrapper otp; - struct mt76_rate_power rate_power; - char alpha2[3]; enum nl80211_dfs_regions region; - u32 debugfs_reg; + struct mt76_scan_rnr_param rnr; - struct led_classdev led_cdev; - char led_name[32]; - bool led_al; - u8 led_pin; + u32 debugfs_reg; u8 csa_complete; u32 rxfilter; + struct delayed_work scan_work; + struct { + struct cfg80211_scan_request *req; + struct ieee80211_channel *chan; + struct ieee80211_vif *vif; + struct mt76_vif_link *mlink; + struct mt76_phy *phy; + int chan_idx; + } scan; + #ifdef CONFIG_NL80211_TESTMODE const struct mt76_testmode_ops *test_ops; struct { @@ -763,6 +1025,103 @@ struct mt76_dev { struct mt76_usb usb; struct mt76_sdio sdio; }; + + atomic_t bus_hung; +}; + +/* per-phy stats. */ +struct mt76_mib_stats { + u32 ack_fail_cnt; + u32 fcs_err_cnt; + u32 rts_cnt; + u32 rts_retries_cnt; + u32 ba_miss_cnt; + u32 tx_bf_cnt; + u32 tx_mu_bf_cnt; + u32 tx_mu_mpdu_cnt; + u32 tx_mu_acked_mpdu_cnt; + u32 tx_su_acked_mpdu_cnt; + u32 tx_bf_ibf_ppdu_cnt; + u32 tx_bf_ebf_ppdu_cnt; + + u32 tx_bf_rx_fb_all_cnt; + u32 tx_bf_rx_fb_eht_cnt; + u32 tx_bf_rx_fb_he_cnt; + u32 tx_bf_rx_fb_vht_cnt; + u32 tx_bf_rx_fb_ht_cnt; + + u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */ + u32 tx_bf_rx_fb_nc_cnt; + u32 tx_bf_rx_fb_nr_cnt; + u32 tx_bf_fb_cpl_cnt; + u32 tx_bf_fb_trig_cnt; + + u32 tx_ampdu_cnt; + u32 tx_stop_q_empty_cnt; + u32 tx_mpdu_attempts_cnt; + u32 tx_mpdu_success_cnt; + u32 tx_pkt_ebf_cnt; + u32 tx_pkt_ibf_cnt; + + u32 tx_rwp_fail_cnt; + u32 tx_rwp_need_cnt; + + /* rx stats */ + u32 rx_fifo_full_cnt; + u32 channel_idle_cnt; + u32 primary_cca_busy_time; + u32 secondary_cca_busy_time; + u32 primary_energy_detect_time; + u32 cck_mdrdy_time; + u32 ofdm_mdrdy_time; + u32 green_mdrdy_time; + u32 rx_vector_mismatch_cnt; + u32 rx_delimiter_fail_cnt; + u32 rx_mrdy_cnt; + u32 rx_len_mismatch_cnt; + u32 rx_mpdu_cnt; + u32 rx_ampdu_cnt; + u32 rx_ampdu_bytes_cnt; + u32 rx_ampdu_valid_subframe_cnt; + u32 rx_ampdu_valid_subframe_bytes_cnt; + u32 rx_pfdrop_cnt; + u32 rx_vec_queue_overflow_drop_cnt; + u32 rx_ba_cnt; + + u32 tx_amsdu[8]; + u32 tx_amsdu_cnt; + + /* mcu_muru_stats */ + u32 dl_cck_cnt; + u32 dl_ofdm_cnt; + u32 dl_htmix_cnt; + u32 dl_htgf_cnt; + u32 dl_vht_su_cnt; + u32 dl_vht_2mu_cnt; + u32 dl_vht_3mu_cnt; + u32 dl_vht_4mu_cnt; + u32 dl_he_su_cnt; + u32 dl_he_ext_su_cnt; + u32 dl_he_2ru_cnt; + u32 dl_he_2mu_cnt; + u32 dl_he_3ru_cnt; + u32 dl_he_3mu_cnt; + u32 dl_he_4ru_cnt; + u32 dl_he_4mu_cnt; + u32 dl_he_5to8ru_cnt; + u32 dl_he_9to16ru_cnt; + u32 dl_he_gtr16ru_cnt; + + u32 ul_hetrig_su_cnt; + u32 ul_hetrig_2ru_cnt; + u32 ul_hetrig_3ru_cnt; + u32 ul_hetrig_4ru_cnt; + u32 ul_hetrig_5to8ru_cnt; + u32 ul_hetrig_9to16ru_cnt; + u32 ul_hetrig_gtr16ru_cnt; + u32 ul_hetrig_2mu_cnt; + u32 ul_hetrig_3mu_cnt; + u32 ul_hetrig_4mu_cnt; }; struct mt76_power_limits { @@ -770,26 +1129,15 @@ struct mt76_power_limits { s8 ofdm[8]; s8 mcs[4][10]; s8 ru[7][12]; -}; - -enum mt76_phy_type { - MT_PHY_TYPE_CCK, - MT_PHY_TYPE_OFDM, - MT_PHY_TYPE_HT, - MT_PHY_TYPE_HT_GF, - MT_PHY_TYPE_VHT, - MT_PHY_TYPE_HE_SU = 8, - MT_PHY_TYPE_HE_EXT_SU, - MT_PHY_TYPE_HE_TB, - MT_PHY_TYPE_HE_MU, - __MT_PHY_TYPE_HE_MAX, -}; + s8 eht[16][16]; -struct mt76_sta_stats { - u64 tx_mode[__MT_PHY_TYPE_HE_MAX]; - u64 tx_bw[4]; /* 20, 40, 80, 160 */ - u64 tx_nss[4]; /* 1, 2, 3, 4 */ - u64 tx_mcs[16]; /* mcs idx */ + struct { + s8 cck[4]; + s8 ofdm[4]; + s8 ofdm_bf[4]; + s8 ru[7][10]; + s8 ru_bf[7][10]; + } path; }; struct mt76_ethtool_worker_info { @@ -800,6 +1148,10 @@ struct mt76_ethtool_worker_info { int sta_count; }; +struct mt76_chanctx { + struct mt76_phy *phy; +}; + #define CCK_RATE(_idx, _rate) { \ .bitrate = _rate, \ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ @@ -834,7 +1186,6 @@ extern struct ieee80211_rate mt76_rates[12]; #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) -#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev)) #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) @@ -850,28 +1201,20 @@ extern struct ieee80211_rate mt76_rates[12]; #define mt76_hw(dev) (dev)->mphy.hw -static inline struct ieee80211_hw * -mt76_wcid_hw(struct mt76_dev *dev, u16 wcid) -{ - if (wcid <= MT76_N_WCIDS && - mt76_wcid_mask_test(dev->wcid_phy_mask, wcid)) - return dev->phy2->hw; - - return dev->phy.hw; -} - bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, int timeout); #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) -bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, - int timeout); - -#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) +bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, + int timeout, int kick); +#define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10) +#define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10) +#define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); void mt76_pci_disable_aspm(struct pci_dev *pdev); +bool mt76_pci_aspm_supported(struct pci_dev *pdev); static inline u16 mt76_chip(struct mt76_dev *dev) { @@ -883,22 +1226,75 @@ static inline u16 mt76_rev(struct mt76_dev *dev) return dev->rev & 0xffff; } +void mt76_wed_release_rx_buf(struct mtk_wed_device *wed); +void mt76_wed_offload_disable(struct mtk_wed_device *wed); +void mt76_wed_reset_complete(struct mtk_wed_device *wed); +void mt76_wed_dma_reset(struct mt76_dev *dev); +int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct net_device *netdev, enum tc_setup_type type, + void *type_data); +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size); +int mt76_wed_offload_enable(struct mtk_wed_device *wed); +int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset); +#else +static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size) +{ + return 0; +} + +static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed) +{ + return 0; +} + +static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, + bool reset) +{ + return 0; +} +#endif /* CONFIG_NET_MEDIATEK_SOC_WED */ + #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) -#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) +#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__) #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_rx_init(dev, ...) (dev)->mt76.queue_ops->rx_queue_init(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) #define mt76_for_each_q_rx(dev, i) \ - for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \ - (dev)->q_rx[i].ndesc; i++) + for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \ + if ((dev)->q_rx[i].ndesc) + + +#define mt76_dereference(p, dev) \ + rcu_dereference_protected(p, lockdep_is_held(&(dev)->mutex)) + +static inline struct mt76_dev *mt76_wed_to_dev(struct mtk_wed_device *wed) +{ +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + if (wed->wlan.hif2) + return container_of(wed, struct mt76_dev, mmio.wed_hif2); +#endif /* CONFIG_NET_MEDIATEK_SOC_WED */ + return container_of(wed, struct mt76_dev, mmio.wed); +} + +static inline struct mt76_wcid * +__mt76_wcid_ptr(struct mt76_dev *dev, u16 idx) +{ + if (idx >= ARRAY_SIZE(dev->wcid)) + return NULL; + return rcu_dereference(dev->wcid[idx]); +} + +#define mt76_wcid_ptr(dev, idx) __mt76_wcid_ptr(&(dev)->mt76, idx) struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, const struct ieee80211_ops *ops, @@ -907,12 +1303,18 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, struct ieee80211_rate *rates, int n_rates); void mt76_unregister_device(struct mt76_dev *dev); void mt76_free_device(struct mt76_dev *dev); +void mt76_reset_device(struct mt76_dev *dev); void mt76_unregister_phy(struct mt76_phy *phy); +struct mt76_phy *mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size, + u8 band_idx); struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, - const struct ieee80211_ops *ops); + const struct ieee80211_ops *ops, + u8 band_idx); int mt76_register_phy(struct mt76_phy *phy, bool vht, struct ieee80211_rate *rates, int n_rates); +struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy, const struct file_operations *ops); @@ -926,23 +1328,24 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len); int mt76_eeprom_init(struct mt76_dev *dev, int len); -void mt76_eeprom_override(struct mt76_phy *phy); -int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len); +int mt76_eeprom_override(struct mt76_phy *phy); +int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len); +int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep, + const char *cell_name, int len); struct mt76_queue * mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, - int ring_base); -u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx); + int ring_base, void *wed, u32 flags); static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, - int n_desc, int ring_base) + int n_desc, int ring_base, void *wed, + u32 flags) { struct mt76_queue *q; - q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base); + q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags); if (IS_ERR(q)) return PTR_ERR(q); - q->qid = qid; phy->q_tx[qid] = q; return 0; @@ -953,28 +1356,29 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, { struct mt76_queue *q; - q = mt76_init_queue(dev, qid, idx, n_desc, ring_base); + q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0); if (IS_ERR(q)) return PTR_ERR(q); - q->qid = __MT_TXQ_MAX + qid; dev->q_mcu[qid] = q; return 0; } static inline struct mt76_phy * -mt76_dev_phy(struct mt76_dev *dev, bool phy_ext) +mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx) { - if (phy_ext && dev->phy2) - return dev->phy2; + if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) || + (phy_idx == MT_BAND2 && dev->phys[phy_idx])) + return dev->phys[phy_idx]; + return &dev->phy; } static inline struct ieee80211_hw * -mt76_phy_hw(struct mt76_dev *dev, bool phy_ext) +mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx) { - return mt76_dev_phy(dev, phy_ext)->hw; + return mt76_dev_phy(dev, phy_idx)->hw; } static inline u8 * @@ -1013,6 +1417,9 @@ wcid_to_sta(struct mt76_wcid *wcid) if (!wcid || !wcid->sta) return NULL; + if (wcid->def_wcid) + ptr = wcid->def_wcid; + return container_of(ptr, struct ieee80211_sta, drv_priv); } @@ -1064,11 +1471,12 @@ static inline bool mt76_is_skb_pktid(u8 pktid) return pktid >= MT_PACKET_ID_FIRST; } -static inline u8 mt76_tx_power_nss_delta(u8 nss) +static inline u8 mt76_tx_power_path_delta(u8 path) { - static const u8 nss_delta[4] = { 0, 6, 9, 12 }; + static const u8 path_delta[5] = { 0, 6, 9, 12, 14 }; + u8 idx = path - 1; - return nss_delta[nss - 1]; + return (idx < ARRAY_SIZE(path_delta)) ? path_delta[idx] : 0; } static inline bool mt76_testmode_enabled(struct mt76_phy *phy) @@ -1085,13 +1493,17 @@ static inline bool mt76_is_testmode_skb(struct mt76_dev *dev, struct ieee80211_hw **hw) { #ifdef CONFIG_NL80211_TESTMODE - if (skb == dev->phy.test.tx_skb) - *hw = dev->phy.hw; - else if (dev->phy2 && skb == dev->phy2->test.tx_skb) - *hw = dev->phy2->hw; - else - return false; - return true; + int i; + + for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { + struct mt76_phy *phy = dev->phys[i]; + + if (phy && skb == phy->test.tx_skb) { + *hw = dev->phys[i]->hw; + return true; + } + } + return false; #else return false; #endif @@ -1114,11 +1526,12 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw, enum ieee80211_frame_release_type reason, bool more_data); bool mt76_has_tx_pending(struct mt76_phy *phy); -void mt76_set_channel(struct mt76_phy *phy); +int mt76_update_channel(struct mt76_phy *phy); void mt76_update_survey(struct mt76_phy *phy); void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time); int mt76_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey); +int mt76_rx_signal(u8 chain_mask, s8 *chain_signal); void mt76_set_stream_caps(struct mt76_phy *phy, bool vht); int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, @@ -1153,15 +1566,17 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); -void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, +void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy); +int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx); + +s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower); int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - int *dbm); + unsigned int link_id, int *dbm); int mt76_init_sar_power(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar); int mt76_get_sar_power(struct mt76_phy *phy, @@ -1171,16 +1586,45 @@ int mt76_get_sar_power(struct mt76_phy *phy, void mt76_csa_check(struct mt76_dev *dev); void mt76_csa_finish(struct mt76_dev *dev); -int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); +int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant, + u32 *rx_ant); int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); int mt76_get_rate(struct mt76_dev *dev, struct ieee80211_supported_band *sband, int idx, bool cck); +int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req); +void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac); void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy); +int mt76_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf); +void mt76_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf); +void mt76_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + u32 changed); +int mt76_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *conf); +void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *conf); +int mt76_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode); +int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration, + enum ieee80211_roc_type type); +int mt76_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len); int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, @@ -1188,6 +1632,109 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state); int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len); +#ifdef CONFIG_MT76_NPU +void mt76_npu_check_ppe(struct mt76_dev *dev, struct sk_buff *skb, + u32 info); +int mt76_npu_dma_add_buf(struct mt76_phy *phy, struct mt76_queue *q, + struct sk_buff *skb, struct mt76_queue_buf *buf, + void *txwi_ptr); +int mt76_npu_rx_queue_init(struct mt76_dev *dev, struct mt76_queue *q); +int mt76_npu_fill_rx_queue(struct mt76_dev *dev, struct mt76_queue *q); +void mt76_npu_queue_cleanup(struct mt76_dev *dev, struct mt76_queue *q); +void mt76_npu_disable_irqs(struct mt76_dev *dev); +int mt76_npu_init(struct mt76_dev *dev, phys_addr_t phy_addr, int type); +void mt76_npu_deinit(struct mt76_dev *dev); +void mt76_npu_queue_setup(struct mt76_dev *dev, struct mt76_queue *q); +void mt76_npu_txdesc_cleanup(struct mt76_queue *q, int index); +int mt76_npu_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct net_device *dev, enum tc_setup_type type, + void *type_data); +#else +static inline void mt76_npu_check_ppe(struct mt76_dev *dev, + struct sk_buff *skb, u32 info) +{ +} + +static inline int mt76_npu_dma_add_buf(struct mt76_phy *phy, + struct mt76_queue *q, + struct sk_buff *skb, + struct mt76_queue_buf *buf, + void *txwi_ptr) +{ + return -EOPNOTSUPP; +} + +static inline int mt76_npu_fill_rx_queue(struct mt76_dev *dev, + struct mt76_queue *q) +{ + return 0; +} + +static inline void mt76_npu_queue_cleanup(struct mt76_dev *dev, + struct mt76_queue *q) +{ +} + +static inline void mt76_npu_disable_irqs(struct mt76_dev *dev) +{ +} + +static inline int mt76_npu_init(struct mt76_dev *dev, phys_addr_t phy_addr, + int type) +{ + return 0; +} + +static inline void mt76_npu_deinit(struct mt76_dev *dev) +{ +} + +static inline void mt76_npu_queue_setup(struct mt76_dev *dev, + struct mt76_queue *q) +{ +} + +static inline void mt76_npu_txdesc_cleanup(struct mt76_queue *q, + int index) +{ +} + +static inline int mt76_npu_net_setup_tc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct net_device *dev, + enum tc_setup_type type, + void *type_data) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_MT76_NPU */ + +static inline bool mt76_npu_device_active(struct mt76_dev *dev) +{ + return !!rcu_access_pointer(dev->mmio.npu); +} + +static inline bool mt76_ppe_device_active(struct mt76_dev *dev) +{ + return !!rcu_access_pointer(dev->mmio.ppe_dev); +} + +static inline int mt76_npu_send_msg(struct airoha_npu *npu, int ifindex, + enum airoha_npu_wlan_set_cmd cmd, + u32 val, gfp_t gfp) +{ + return airoha_npu_wlan_send_msg(npu, ifindex, cmd, &val, sizeof(val), + gfp); +} + +static inline int mt76_npu_get_msg(struct airoha_npu *npu, int ifindex, + enum airoha_npu_wlan_get_cmd cmd, + u32 *val, gfp_t gfp) +{ + return airoha_npu_wlan_get_msg(npu, ifindex, cmd, val, sizeof(*val), + gfp); +} + static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable) { #ifdef CONFIG_NL80211_TESTMODE @@ -1206,17 +1753,18 @@ static inline struct ieee80211_hw * mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hw *hw = dev->phy.hw; - - if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2) - hw = dev->phy2->hw; + u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; + struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx); - info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY; + info->hw_queue &= ~MT_TX_HW_QUEUE_PHY; return hw; } void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); +void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); +struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev); +void mt76_free_pending_rxwi(struct mt76_dev *dev); void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, struct napi_struct *napi); void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, @@ -1225,6 +1773,19 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); void mt76_testmode_tx_pending(struct mt76_phy *phy); void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_entry *e); +int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, + bool offchannel); +int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, + bool offchannel); +void mt76_scan_work(struct work_struct *work); +void mt76_abort_scan(struct mt76_dev *dev); +void mt76_roc_complete_work(struct work_struct *work); +void mt76_roc_complete(struct mt76_phy *phy); +void mt76_abort_roc(struct mt76_phy *phy); +struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy, + struct ieee80211_vif *vif); +void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif, + struct mt76_vif_link *mlink); /* usb */ static inline bool mt76u_urb_error(struct urb *urb) @@ -1235,13 +1796,6 @@ static inline bool mt76u_urb_error(struct urb *urb) urb->status != -ENOENT; } -/* Map hardware queues to usb endpoints */ -static inline u8 q2ep(u8 qid) -{ - /* TODO: take management packets to queue 5 */ - return qid + 1; -} - static inline int mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, int timeout, int ep) @@ -1259,16 +1813,25 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); } +void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index); void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, - struct mt76_sta_stats *stats); + struct mt76_sta_stats *stats, bool eht); int mt76_skb_adjust_pad(struct sk_buff *skb, int pad); +int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, + u16 val, u16 offset, void *buf, size_t len); int mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, u16 val, u16 offset, void *buf, size_t len); void mt76u_single_wr(struct mt76_dev *dev, const u8 req, const u16 offset, const u32 val); -int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, - bool ext); +void mt76u_read_copy(struct mt76_dev *dev, u32 offset, + void *data, int len); +u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr); +void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type, + u32 addr, u32 val); +int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, + struct mt76_bus_ops *ops); +int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); int mt76u_alloc_mcu_queue(struct mt76_dev *dev); int mt76u_alloc_queues(struct mt76_dev *dev); void mt76u_stop_tx(struct mt76_dev *dev); @@ -1301,8 +1864,15 @@ int mt76s_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, int len); struct sk_buff * +__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, + int len, int data_len, gfp_t gfp); +static inline struct sk_buff * mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, - int data_len); + int data_len) +{ + return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL); +} + void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, unsigned long expires); @@ -1337,15 +1907,120 @@ mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); +struct device_node * +mt76_find_power_limits_node(struct mt76_dev *dev); +struct device_node * +mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan); + s8 mt76_get_rate_power_limits(struct mt76_phy *phy, struct ieee80211_channel *chan, struct mt76_power_limits *dest, s8 target_power); +static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { + if (q == &dev->q_rx[i]) + return true; + } + + return false; +} + +static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q) +{ + return (q->flags & MT_QFLAG_WED) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE; +} + +static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q) +{ + return q->flags & MT_QFLAG_WED_RRO; +} + +static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q) +{ + return mt76_queue_is_wed_rro(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND; +} + +static inline bool mt76_queue_is_wed_rro_rxdmad_c(struct mt76_queue *q) +{ + return mt76_queue_is_wed_rro(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_RXDMAD_C; +} + +static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q) +{ + return mt76_queue_is_wed_rro(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA; +} + +static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q) +{ + return mt76_queue_is_wed_rro(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == + MT76_WED_RRO_Q_MSDU_PG; +} + +static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) +{ + return (q->flags & MT_QFLAG_WED) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; +} + +static inline bool mt76_queue_is_emi(struct mt76_queue *q) +{ + return q->flags & MT_QFLAG_EMI_EN; +} + +static inline bool mt76_queue_is_npu(struct mt76_queue *q) +{ + return q->flags & MT_QFLAG_NPU; +} + +static inline bool mt76_queue_is_npu_tx(struct mt76_queue *q) +{ + return mt76_queue_is_npu(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TX; +} + +static inline bool mt76_queue_is_npu_rx(struct mt76_queue *q) +{ + return mt76_queue_is_npu(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; +} + struct mt76_txwi_cache * mt76_token_release(struct mt76_dev *dev, int token, bool *wake); int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); +struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); +int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, + struct mt76_txwi_cache *r, dma_addr_t phys); +int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q); +static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct) +{ + struct page *page = virt_to_head_page(buf); + + page_pool_put_full_page(pp_page_to_nmdesc(page)->pp, page, + allow_direct); +} + +static inline void * +mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size) +{ + struct page *page; + + page = page_pool_alloc_frag(q->page_pool, offset, size, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); + if (!page) + return NULL; + + return page_address(page) + *offset; +} static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) { @@ -1360,8 +2035,7 @@ mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) int token; spin_lock_bh(&dev->token_lock); - token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size, - GFP_ATOMIC); + token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); spin_unlock_bh(&dev->token_lock); return token; @@ -1379,22 +2053,58 @@ mt76_token_put(struct mt76_dev *dev, int token) return txwi; } -static inline void mt76_packet_id_init(struct mt76_wcid *wcid) +void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx); +void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid); +void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid); + +static inline void +mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif) +{ + struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; + + mlink->mvif = mvif; + rcu_assign_pointer(mvif->link[0], mlink); +} + +void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif); +u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links); + +static inline struct mt76_vif_link * +mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id) { - INIT_LIST_HEAD(&wcid->list); - idr_init(&wcid->pktid); + struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; + struct mt76_vif_data *mvif = mlink->mvif; + + if (!link_id) + return mlink; + + return mt76_dereference(mvif->link[link_id], dev); } -static inline void -mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid) +static inline struct mt76_vif_link * +mt76_vif_conf_link(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { - struct sk_buff_head list; + struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; + struct mt76_vif_data *mvif = mlink->mvif; + + if (link_conf == &vif->bss_conf || !link_conf->link_id) + return mlink; + + return mt76_dereference(mvif->link[link_conf->link_id], dev); +} + +static inline struct mt76_phy * +mt76_vif_link_phy(struct mt76_vif_link *mlink) +{ + struct mt76_chanctx *ctx; + + if (!mlink->ctx) + return NULL; - mt76_tx_status_lock(dev, &list); - mt76_tx_status_skb_get(dev, wcid, -1, &list); - mt76_tx_status_unlock(dev, &list); + ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv; - idr_destroy(&wcid->pktid); + return ctx->phy; } #endif |
