diff options
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/mt76.h')
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt76.h | 436 |
1 files changed, 403 insertions, 33 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 0b75a45ad2e8..d05e83ea1cac 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ISC */ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> */ @@ -13,6 +13,7 @@ #include <linux/leds.h> #include <linux/usb.h> #include <linux/average.h> +#include <linux/soc/airoha/airoha_offload.h> #include <linux/soc/mediatek/mtk_wed.h> #include <net/mac80211.h> #include <net/page_pool/helpers.h> @@ -33,6 +34,8 @@ #define MT_QFLAG_WED BIT(5) #define MT_QFLAG_WED_RRO BIT(6) #define MT_QFLAG_WED_RRO_EN BIT(7) +#define MT_QFLAG_EMI_EN BIT(8) +#define MT_QFLAG_NPU BIT(9) #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \ FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ @@ -45,11 +48,20 @@ #define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n) #define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n) #define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0) +#define MT_WED_RRO_Q_RXDMAD_C __MT_WED_RRO_Q(MT76_WED_RRO_Q_RXDMAD_C, 0) + +#define __MT_NPU_Q(_type, _n) (MT_QFLAG_NPU | \ + FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ + FIELD_PREP(MT_QFLAG_WED_RING, _n)) +#define MT_NPU_Q_TX(_n) __MT_NPU_Q(MT76_WED_Q_TX, _n) +#define MT_NPU_Q_RX(_n) __MT_NPU_Q(MT76_WED_Q_RX, _n) struct mt76_dev; struct mt76_phy; struct mt76_wcid; struct mt76s_intr; +struct mt76_chanctx; +struct mt76_vif_link; struct mt76_reg_pair { u32 reg; @@ -69,6 +81,13 @@ enum mt76_wed_type { MT76_WED_RRO_Q_DATA, MT76_WED_RRO_Q_MSDU_PG, MT76_WED_RRO_Q_IND, + MT76_WED_RRO_Q_RXDMAD_C, +}; + +enum mt76_hwrro_mode { + MT76_HWRRO_OFF, + MT76_HWRRO_V3, + MT76_HWRRO_V3_1, }; struct mt76_bus_ops { @@ -127,6 +146,9 @@ enum mt76_rxq_id { MT_RXQ_TXFREE_BAND1, MT_RXQ_TXFREE_BAND2, MT_RXQ_RRO_IND, + MT_RXQ_RRO_RXDMAD_C, + MT_RXQ_NPU0, + MT_RXQ_NPU1, __MT_RXQ_MAX }; @@ -160,6 +182,16 @@ enum mt76_dfs_state { MT_DFS_STATE_ACTIVE, }; +#define MT76_RNR_SCAN_MAX_BSSIDS 16 +struct mt76_scan_rnr_param { + u8 bssid[MT76_RNR_SCAN_MAX_BSSIDS][ETH_ALEN]; + u8 channel[MT76_RNR_SCAN_MAX_BSSIDS]; + u8 random_mac[ETH_ALEN]; + u8 seq_num; + u8 bssid_num; + u32 sreq_flag; +}; + struct mt76_queue_buf { dma_addr_t addr; u16 len:15, @@ -220,8 +252,12 @@ struct mt76_queue { u8 buf_offset; u16 flags; + u8 magic_cnt; + + __le16 *emi_cpu_idx; struct mtk_wed_device *wed; + struct mt76_dev *dev; u32 wed_regs; dma_addr_t desc_dma; @@ -274,11 +310,15 @@ struct mt76_queue_ops { void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q, bool flush); + void (*rx_queue_init)(struct mt76_dev *dev, enum mt76_rxq_id qid, + int (*poll)(struct napi_struct *napi, int budget)); + void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q); void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); - void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); + void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q, + bool reset_idx); }; enum mt76_phy_type { @@ -349,6 +389,7 @@ struct mt76_wcid { u8 hw_key_idx; u8 hw_key_idx2; + u8 offchannel:1; u8 sta:1; u8 sta_disabled:1; u8 amsdu:1; @@ -385,15 +426,16 @@ struct mt76_txq { bool aggr; }; +/* data0 */ +#define RRO_IND_DATA0_IND_REASON_MASK GENMASK(31, 28) +#define RRO_IND_DATA0_START_SEQ_MASK GENMASK(27, 16) +#define RRO_IND_DATA0_SEQ_ID_MASK GENMASK(11, 0) +/* data1 */ +#define RRO_IND_DATA1_MAGIC_CNT_MASK GENMASK(31, 29) +#define RRO_IND_DATA1_IND_COUNT_MASK GENMASK(12, 0) struct mt76_wed_rro_ind { - u32 se_id : 12; - u32 rsv : 4; - u32 start_sn : 12; - u32 ind_reason : 4; - u32 ind_cnt : 13; - u32 win_sz : 3; - u32 rsv2 : 13; - u32 magic_cnt : 3; + __le32 data0; + __le32 data1; }; struct mt76_txwi_cache { @@ -404,6 +446,8 @@ struct mt76_txwi_cache { struct sk_buff *skb; void *ptr; }; + + u8 qid; }; struct mt76_rx_tid { @@ -489,6 +533,7 @@ struct mt76_hw_cap { #define MT_DRV_RX_DMA_HDR BIT(3) #define MT_DRV_HW_MGMT_TXQ BIT(4) #define MT_DRV_AMSDU_OFFLOAD BIT(5) +#define MT_DRV_IGNORE_TXS_FAILED BIT(6) struct mt76_driver_ops { u32 drv_flags; @@ -497,6 +542,8 @@ struct mt76_driver_ops { u16 token_size; u8 mcs_rates; + unsigned int link_data_size; + void (*update_survey)(struct mt76_phy *phy); int (*set_channel)(struct mt76_phy *phy); @@ -517,6 +564,10 @@ struct mt76_driver_ops { void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); + void (*rx_rro_ind_process)(struct mt76_dev *dev, void *data); + int (*rx_rro_add_msdu_page)(struct mt76_dev *dev, struct mt76_queue *q, + dma_addr_t p, void *data); + void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); @@ -528,6 +579,15 @@ struct mt76_driver_ops { void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); + + int (*vif_link_add)(struct mt76_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct mt76_vif_link *mlink); + + void (*vif_link_remove)(struct mt76_phy *phy, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct mt76_vif_link *mlink); }; struct mt76_channel_state { @@ -636,6 +696,7 @@ struct mt76_sdio { u8 hw_ver; wait_queue_head_t wait; + int pse_mcu_quota_max; struct { int pse_data_quota; int ple_data_quota; @@ -656,6 +717,11 @@ struct mt76_mmio { struct mtk_wed_device wed_hif2; struct completion wed_reset; struct completion wed_reset_complete; + + struct airoha_ppe_dev __rcu *ppe_dev; + struct airoha_npu __rcu *npu; + phys_addr_t phy_addr; + int npu_type; }; struct mt76_rx_status { @@ -753,8 +819,9 @@ struct mt76_testmode_data { } rx_stats; }; -struct mt76_vif { +struct mt76_vif_link { u8 idx; + u8 link_idx; u8 omac_idx; u8 band_idx; u8 wmm_idx; @@ -763,7 +830,20 @@ struct mt76_vif { u8 basic_rates_idx; u8 mcast_rates_idx; u8 beacon_rates_idx; + bool offchannel; struct ieee80211_chanctx_conf *ctx; + struct mt76_wcid *wcid; + struct mt76_vif_data *mvif; + struct rcu_head rcu_head; +}; + +struct mt76_vif_data { + struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + struct mt76_vif_link __rcu *offchannel_link; + + struct mt76_phy *roc_phy; + u16 valid_links; + u8 deflink_id; }; struct mt76_phy { @@ -772,6 +852,7 @@ struct mt76_phy { void *priv; unsigned long state; + unsigned int num_sta; u8 band_idx; spinlock_t tx_lock; @@ -779,8 +860,15 @@ struct mt76_phy { struct mt76_queue *q_tx[__MT_TXQ_MAX]; struct cfg80211_chan_def chandef; - struct ieee80211_channel *main_chan; + struct cfg80211_chan_def main_chandef; bool offchannel; + bool radar_enabled; + + struct delayed_work roc_work; + struct ieee80211_vif *roc_vif; + struct mt76_vif_link *roc_link; + + struct mt76_chanctx *chanctx; struct mt76_channel_state *chan_state; enum mt76_dfs_state dfs_state; @@ -825,6 +913,7 @@ struct mt76_phy { struct mt76_dev { struct mt76_phy phy; /* must be first */ struct mt76_phy *phys[__MT_MAX_BAND]; + struct mt76_phy *band_phys[NUM_NL80211_BANDS]; struct ieee80211_hw *hw; @@ -861,6 +950,7 @@ struct mt76_dev { struct mt76_queue q_rx[__MT_RXQ_MAX]; const struct mt76_queue_ops *queue_ops; int tx_dma_idx[4]; + enum mt76_hwrro_mode hwrro_mode; struct mt76_worker tx_worker; struct napi_struct tx_napi; @@ -869,6 +959,7 @@ struct mt76_dev { struct idr token; u16 wed_token_count; u16 token_count; + u16 token_start; u16 token_size; spinlock_t rx_token_lock; @@ -880,7 +971,6 @@ struct mt76_dev { spinlock_t status_lock; u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; - u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; u64 vif_mask; @@ -903,12 +993,24 @@ struct mt76_dev { char alpha2[3]; enum nl80211_dfs_regions region; + struct mt76_scan_rnr_param rnr; + u32 debugfs_reg; u8 csa_complete; u32 rxfilter; + struct delayed_work scan_work; + struct { + struct cfg80211_scan_request *req; + struct ieee80211_channel *chan; + struct ieee80211_vif *vif; + struct mt76_vif_link *mlink; + struct mt76_phy *phy; + int chan_idx; + } scan; + #ifdef CONFIG_NL80211_TESTMODE const struct mt76_testmode_ops *test_ops; struct { @@ -923,6 +1025,8 @@ struct mt76_dev { struct mt76_usb usb; struct mt76_sdio sdio; }; + + atomic_t bus_hung; }; /* per-phy stats. */ @@ -1026,6 +1130,14 @@ struct mt76_power_limits { s8 mcs[4][10]; s8 ru[7][12]; s8 eht[16][16]; + + struct { + s8 cck[4]; + s8 ofdm[4]; + s8 ofdm_bf[4]; + s8 ru[7][10]; + s8 ru_bf[7][10]; + } path; }; struct mt76_ethtool_worker_info { @@ -1036,6 +1148,10 @@ struct mt76_ethtool_worker_info { int sta_count; }; +struct mt76_chanctx { + struct mt76_phy *phy; +}; + #define CCK_RATE(_idx, _rate) { \ .bitrate = _rate, \ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ @@ -1148,6 +1264,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__) #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_rx_init(dev, ...) (dev)->mt76.queue_ops->rx_queue_init(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) @@ -1156,6 +1273,29 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \ if ((dev)->q_rx[i].ndesc) + +#define mt76_dereference(p, dev) \ + rcu_dereference_protected(p, lockdep_is_held(&(dev)->mutex)) + +static inline struct mt76_dev *mt76_wed_to_dev(struct mtk_wed_device *wed) +{ +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + if (wed->wlan.hif2) + return container_of(wed, struct mt76_dev, mmio.wed_hif2); +#endif /* CONFIG_NET_MEDIATEK_SOC_WED */ + return container_of(wed, struct mt76_dev, mmio.wed); +} + +static inline struct mt76_wcid * +__mt76_wcid_ptr(struct mt76_dev *dev, u16 idx) +{ + if (idx >= ARRAY_SIZE(dev->wcid)) + return NULL; + return rcu_dereference(dev->wcid[idx]); +} + +#define mt76_wcid_ptr(dev, idx) __mt76_wcid_ptr(&(dev)->mt76, idx) + struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, const struct ieee80211_ops *ops, const struct mt76_driver_ops *drv_ops); @@ -1163,13 +1303,18 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, struct ieee80211_rate *rates, int n_rates); void mt76_unregister_device(struct mt76_dev *dev); void mt76_free_device(struct mt76_dev *dev); +void mt76_reset_device(struct mt76_dev *dev); void mt76_unregister_phy(struct mt76_phy *phy); +struct mt76_phy *mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size, + u8 band_idx); struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, const struct ieee80211_ops *ops, u8 band_idx); int mt76_register_phy(struct mt76_phy *phy, bool vht, struct ieee80211_rate *rates, int n_rates); +struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy, const struct file_operations *ops); @@ -1183,7 +1328,7 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len); int mt76_eeprom_init(struct mt76_dev *dev, int len); -void mt76_eeprom_override(struct mt76_phy *phy); +int mt76_eeprom_override(struct mt76_phy *phy); int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len); int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep, const char *cell_name, int len); @@ -1191,8 +1336,6 @@ int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep, struct mt76_queue * mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, int ring_base, void *wed, u32 flags); -u16 mt76_calculate_default_rate(struct mt76_phy *phy, - struct ieee80211_vif *vif, int rateidx); static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, int n_desc, int ring_base, void *wed, u32 flags) @@ -1328,12 +1471,12 @@ static inline bool mt76_is_skb_pktid(u8 pktid) return pktid >= MT_PACKET_ID_FIRST; } -static inline u8 mt76_tx_power_nss_delta(u8 nss) +static inline u8 mt76_tx_power_path_delta(u8 path) { - static const u8 nss_delta[4] = { 0, 6, 9, 12 }; - u8 idx = nss - 1; + static const u8 path_delta[5] = { 0, 6, 9, 12, 14 }; + u8 idx = path - 1; - return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0; + return (idx < ARRAY_SIZE(path_delta)) ? path_delta[idx] : 0; } static inline bool mt76_testmode_enabled(struct mt76_phy *phy) @@ -1423,15 +1566,17 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); -void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, +void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy); +int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx); + +s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower); int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - int *dbm); + unsigned int link_id, int *dbm); int mt76_init_sar_power(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar); int mt76_get_sar_power(struct mt76_phy *phy, @@ -1441,17 +1586,45 @@ int mt76_get_sar_power(struct mt76_phy *phy, void mt76_csa_check(struct mt76_dev *dev); void mt76_csa_finish(struct mt76_dev *dev); -int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); +int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant, + u32 *rx_ant); int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); int mt76_get_rate(struct mt76_dev *dev, struct ieee80211_supported_band *sband, int idx, bool cck); +int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req); +void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac); void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy); +int mt76_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf); +void mt76_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf); +void mt76_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *conf, + u32 changed); +int mt76_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *conf); +void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *conf); +int mt76_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode); +int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration, + enum ieee80211_roc_type type); +int mt76_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len); int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, @@ -1459,6 +1632,109 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state); int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len); +#ifdef CONFIG_MT76_NPU +void mt76_npu_check_ppe(struct mt76_dev *dev, struct sk_buff *skb, + u32 info); +int mt76_npu_dma_add_buf(struct mt76_phy *phy, struct mt76_queue *q, + struct sk_buff *skb, struct mt76_queue_buf *buf, + void *txwi_ptr); +int mt76_npu_rx_queue_init(struct mt76_dev *dev, struct mt76_queue *q); +int mt76_npu_fill_rx_queue(struct mt76_dev *dev, struct mt76_queue *q); +void mt76_npu_queue_cleanup(struct mt76_dev *dev, struct mt76_queue *q); +void mt76_npu_disable_irqs(struct mt76_dev *dev); +int mt76_npu_init(struct mt76_dev *dev, phys_addr_t phy_addr, int type); +void mt76_npu_deinit(struct mt76_dev *dev); +void mt76_npu_queue_setup(struct mt76_dev *dev, struct mt76_queue *q); +void mt76_npu_txdesc_cleanup(struct mt76_queue *q, int index); +int mt76_npu_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct net_device *dev, enum tc_setup_type type, + void *type_data); +#else +static inline void mt76_npu_check_ppe(struct mt76_dev *dev, + struct sk_buff *skb, u32 info) +{ +} + +static inline int mt76_npu_dma_add_buf(struct mt76_phy *phy, + struct mt76_queue *q, + struct sk_buff *skb, + struct mt76_queue_buf *buf, + void *txwi_ptr) +{ + return -EOPNOTSUPP; +} + +static inline int mt76_npu_fill_rx_queue(struct mt76_dev *dev, + struct mt76_queue *q) +{ + return 0; +} + +static inline void mt76_npu_queue_cleanup(struct mt76_dev *dev, + struct mt76_queue *q) +{ +} + +static inline void mt76_npu_disable_irqs(struct mt76_dev *dev) +{ +} + +static inline int mt76_npu_init(struct mt76_dev *dev, phys_addr_t phy_addr, + int type) +{ + return 0; +} + +static inline void mt76_npu_deinit(struct mt76_dev *dev) +{ +} + +static inline void mt76_npu_queue_setup(struct mt76_dev *dev, + struct mt76_queue *q) +{ +} + +static inline void mt76_npu_txdesc_cleanup(struct mt76_queue *q, + int index) +{ +} + +static inline int mt76_npu_net_setup_tc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct net_device *dev, + enum tc_setup_type type, + void *type_data) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_MT76_NPU */ + +static inline bool mt76_npu_device_active(struct mt76_dev *dev) +{ + return !!rcu_access_pointer(dev->mmio.npu); +} + +static inline bool mt76_ppe_device_active(struct mt76_dev *dev) +{ + return !!rcu_access_pointer(dev->mmio.ppe_dev); +} + +static inline int mt76_npu_send_msg(struct airoha_npu *npu, int ifindex, + enum airoha_npu_wlan_set_cmd cmd, + u32 val, gfp_t gfp) +{ + return airoha_npu_wlan_send_msg(npu, ifindex, cmd, &val, sizeof(val), + gfp); +} + +static inline int mt76_npu_get_msg(struct airoha_npu *npu, int ifindex, + enum airoha_npu_wlan_get_cmd cmd, + u32 *val, gfp_t gfp) +{ + return airoha_npu_wlan_get_msg(npu, ifindex, cmd, val, sizeof(*val), + gfp); +} + static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable) { #ifdef CONFIG_NL80211_TESTMODE @@ -1497,8 +1773,19 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); void mt76_testmode_tx_pending(struct mt76_phy *phy); void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_entry *e); +int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, + bool offchannel); int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, bool offchannel); +void mt76_scan_work(struct work_struct *work); +void mt76_abort_scan(struct mt76_dev *dev); +void mt76_roc_complete_work(struct work_struct *work); +void mt76_roc_complete(struct mt76_phy *phy); +void mt76_abort_roc(struct mt76_phy *phy); +struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy, + struct ieee80211_vif *vif); +void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif, + struct mt76_vif_link *mlink); /* usb */ static inline bool mt76u_urb_error(struct urb *urb) @@ -1659,21 +1946,51 @@ static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q) FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND; } +static inline bool mt76_queue_is_wed_rro_rxdmad_c(struct mt76_queue *q) +{ + return mt76_queue_is_wed_rro(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_RXDMAD_C; +} + static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q) { return mt76_queue_is_wed_rro(q) && - (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA || - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG); + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA; +} + +static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q) +{ + return mt76_queue_is_wed_rro(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == + MT76_WED_RRO_Q_MSDU_PG; } static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) { - if (!(q->flags & MT_QFLAG_WED)) - return false; + return (q->flags & MT_QFLAG_WED) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; +} - return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX || - mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q); +static inline bool mt76_queue_is_emi(struct mt76_queue *q) +{ + return q->flags & MT_QFLAG_EMI_EN; +} + +static inline bool mt76_queue_is_npu(struct mt76_queue *q) +{ + return q->flags & MT_QFLAG_NPU; +} +static inline bool mt76_queue_is_npu_tx(struct mt76_queue *q) +{ + return mt76_queue_is_npu(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TX; +} + +static inline bool mt76_queue_is_npu_rx(struct mt76_queue *q) +{ + return mt76_queue_is_npu(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; } struct mt76_txwi_cache * @@ -1688,7 +2005,8 @@ static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct) { struct page *page = virt_to_head_page(buf); - page_pool_put_full_page(page->pp, page, allow_direct); + page_pool_put_full_page(pp_page_to_nmdesc(page)->pp, page, + allow_direct); } static inline void * @@ -1696,7 +2014,8 @@ mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size) { struct page *page; - page = page_pool_dev_alloc_frag(q->page_pool, offset, size); + page = page_pool_alloc_frag(q->page_pool, offset, size, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); if (!page) return NULL; @@ -1734,7 +2053,58 @@ mt76_token_put(struct mt76_dev *dev, int token) return txwi; } -void mt76_wcid_init(struct mt76_wcid *wcid); +void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx); void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid); +void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid); + +static inline void +mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif) +{ + struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; + + mlink->mvif = mvif; + rcu_assign_pointer(mvif->link[0], mlink); +} + +void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif); +u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links); + +static inline struct mt76_vif_link * +mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id) +{ + struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; + struct mt76_vif_data *mvif = mlink->mvif; + + if (!link_id) + return mlink; + + return mt76_dereference(mvif->link[link_id], dev); +} + +static inline struct mt76_vif_link * +mt76_vif_conf_link(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; + struct mt76_vif_data *mvif = mlink->mvif; + + if (link_conf == &vif->bss_conf || !link_conf->link_id) + return mlink; + + return mt76_dereference(mvif->link[link_conf->link_id], dev); +} + +static inline struct mt76_phy * +mt76_vif_link_phy(struct mt76_vif_link *mlink) +{ + struct mt76_chanctx *ctx; + + if (!mlink->ctx) + return NULL; + + ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv; + + return ctx->phy; +} #endif |
