diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice.h')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice.h | 358 |
1 files changed, 265 insertions, 93 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index aa32111afd6e..147aaee192a7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -32,6 +32,7 @@ #include <linux/pkt_sched.h> #include <linux/if_bridge.h> #include <linux/ctype.h> +#include <linux/linkmode.h> #include <linux/bpf.h> #include <linux/btf.h> #include <linux/auxiliary_bus.h> @@ -66,6 +67,7 @@ #include "ice_sriov.h" #include "ice_vf_mbx.h" #include "ice_ptp.h" +#include "ice_tspll.h" #include "ice_fdir.h" #include "ice_xsk.h" #include "ice_arfs.h" @@ -74,11 +76,19 @@ #include "ice_lag.h" #include "ice_vsi_vlan_ops.h" #include "ice_gnss.h" +#include "ice_irq.h" +#include "ice_dpll.h" +#include "ice_adapter.h" +#include "devlink/health.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 #define ICE_MIN_NUM_DESC 64 -#define ICE_MAX_NUM_DESC 8160 +#define ICE_MAX_NUM_DESC_E810 8160 +#define ICE_MAX_NUM_DESC_E830 8096 +#define ICE_MAX_NUM_DESC_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ + ICE_MAX_NUM_DESC_E830 : \ + ICE_MAX_NUM_DESC_E810) #define ICE_DFLT_MIN_RX_DESC 512 #define ICE_DFLT_NUM_TX_DESC 256 #define ICE_DFLT_NUM_RX_DESC 2048 @@ -92,9 +102,6 @@ #define ICE_MIN_LAN_OICR_MSIX 1 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) #define ICE_FDIR_MSIX 2 -#define ICE_RDMA_NUM_AEQ_MSIX 4 -#define ICE_MIN_RDMA_MSIX 2 -#define ICE_ESWITCH_MSIX 1 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -103,11 +110,6 @@ #define ICE_Q_WAIT_RETRY_LIMIT 10 #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) #define ICE_MAX_LG_RSS_QS 256 -#define ICE_RES_VALID_BIT 0x8000 -#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) -#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1) -/* All VF control VSIs share the same IRQ, so assign a unique ID for them */ -#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ @@ -182,11 +184,9 @@ #define ice_for_each_chnl_tc(i) \ for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) -#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX) +#define ICE_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX -#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \ - ICE_PROMISC_UCAST_RX | \ - ICE_PROMISC_VLAN_TX | \ +#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \ ICE_PROMISC_VLAN_RX) #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) @@ -200,9 +200,16 @@ enum ice_feature { ICE_F_DSCP, - ICE_F_PTP_EXTTS, + ICE_F_PHY_RCLK, ICE_F_SMA_CTRL, + ICE_F_CGU, ICE_F_GNSS, + ICE_F_TXTIME, + ICE_F_GCS, + ICE_F_ROCE_LAG, + ICE_F_SRIOV_LAG, + ICE_F_SRIOV_AA_LAG, + ICE_F_MBX_LIMIT, ICE_F_MAX }; @@ -245,12 +252,6 @@ struct ice_tc_cfg { struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; }; -struct ice_res_tracker { - u16 num_entries; - u16 end; - u16 list[]; -}; - struct ice_qs_cfg { struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */ unsigned long *pf_map; @@ -320,6 +321,7 @@ enum ice_vsi_state { ICE_VSI_UMAC_FLTR_CHANGED, ICE_VSI_MMAC_FLTR_CHANGED, ICE_VSI_PROMISC_CHANGED, + ICE_VSI_REBUILD_PENDING, ICE_VSI_STATE_NBITS /* must be last */ }; @@ -333,7 +335,6 @@ struct ice_vsi { struct net_device *netdev; struct ice_sw *vsw; /* switch this VSI is on */ struct ice_pf *back; /* back pointer to PF */ - struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_rx_ring **rx_rings; /* Rx ring array */ struct ice_tx_ring **tx_rings; /* Tx ring array */ struct ice_q_vector **q_vectors; /* q_vector array */ @@ -348,19 +349,20 @@ struct ice_vsi { u32 rx_buf_failed; u32 rx_page_failed; u16 num_q_vectors; - u16 base_vector; /* IRQ base for OS reserved vectors */ - enum ice_vsi_type type; + /* tell if only dynamic irq allocation is allowed */ + bool irq_dyn_alloc; + bool hsplit:1; + u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - struct ice_vf *vf; /* VF associated with this VSI */ - u16 num_gfltr; u16 num_bfltr; /* RSS config */ u16 rss_table_size; /* HW RSS table size */ u16 rss_size; /* Allocated RSS queues */ + u8 rss_hfunc; /* User configured hash type */ u8 *rss_hkey_user; /* User configured hash keys */ u8 *rss_lut_user; /* User configured lookup table entries */ u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ @@ -374,9 +376,9 @@ struct ice_vsi { atomic_t *arfs_last_fltr_id; u16 max_frame; - u16 rx_buf_len; struct ice_aqc_vsi_props info; /* VSI properties */ + struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ /* VSI stats */ struct rtnl_link_stats64 net_stats; @@ -407,13 +409,12 @@ struct ice_vsi { u16 req_rxq; /* User requested Rx queues */ u16 num_rx_desc; u16 num_tx_desc; - u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; struct ice_tx_ring **xdp_rings; /* XDP ring array */ - unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + struct mutex xdp_state_lock; struct net_device **target_netdevs; @@ -444,12 +445,23 @@ struct ice_vsi { u8 old_numtc; u16 old_ena_tc; - struct ice_channel *ch; - /* setup back reference, to which aggregator node this VSI * corresponds to */ struct ice_agg_node *agg_node; + + struct_group_tagged(ice_vsi_cfg_params, params, + struct ice_port_info *port_info; /* back pointer to port_info */ + struct ice_channel *ch; /* VSI's channel structure, may be NULL */ + union { + /* VF associated with this VSI, may be NULL */ + struct ice_vf *vf; + /* SF associated with this VSI, may be NULL */ + struct ice_dynamic_port *sf; + }; + u32 flags; /* VSI flags used for rebuild and configuration */ + enum ice_vsi_type type; /* the type of the VSI */ + ); } ____cacheline_internodealigned_in_smp; /* struct that defines an interrupt vector */ @@ -457,7 +469,7 @@ struct ice_q_vector { struct ice_vsi *vsi; u16 v_idx; /* index in the vsi->q_vector array. */ - u16 reg_idx; + u16 reg_idx; /* PF relative register index */ u8 num_ring_rx; /* total number of Rx rings in vector */ u8 num_ring_tx; /* total number of Tx rings in vector */ u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ @@ -471,14 +483,13 @@ struct ice_q_vector { struct ice_ring_container rx; struct ice_ring_container tx; - cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; - struct ice_channel *ch; char name[ICE_INT_NAME_STR_LEN]; u16 total_events; /* net_dim(): number of interrupts processed */ + u16 vf_reg_idx; /* VF relative register index */ + struct msi_map irq; } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { @@ -491,7 +502,6 @@ enum ice_pf_flags { ICE_FLAG_DCB_ENA, ICE_FLAG_FD_ENA, ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ - ICE_FLAG_PTP, /* PTP is enabled by software */ ICE_FLAG_ADV_FEATURES, ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ ICE_FLAG_CLS_FLOWER, @@ -502,21 +512,29 @@ enum ice_pf_flags { ICE_FLAG_MOD_POWER_UNSUPPORTED, ICE_FLAG_PHY_FW_LOAD_FAILED, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ - ICE_FLAG_LEGACY_RX, ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_MDD_AUTO_RESET_VF, ICE_FLAG_VF_VLAN_PRUNING, ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_FLAG_PLUG_AUX_DEV, ICE_FLAG_UNPLUG_AUX_DEV, + ICE_FLAG_AUX_DEV_CREATED, ICE_FLAG_MTU_CHANGED, ICE_FLAG_GNSS, /* GNSS successfully initialized */ + ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ + ICE_FLAG_LLDP_AQ_FLTR, ICE_PF_FLAGS_NBITS /* must be last */ }; -struct ice_switchdev_info { - struct ice_vsi *control_vsi; +enum ice_misc_thread_tasks { + ICE_MISC_THREAD_TX_TSTAMP, + ICE_MISC_THREAD_NBITS /* must be last */ +}; + +struct ice_eswitch { struct ice_vsi *uplink_vsi; + struct ice_esw_br_offloads *br_offloads; + struct xarray reprs; bool is_running; }; @@ -527,8 +545,17 @@ struct ice_agg_node { u8 valid; }; +struct ice_pf_msix { + u32 cur; + u32 min; + u32 max; + u32 total; + u32 rest; +}; + struct ice_pf { struct pci_dev *pdev; + struct ice_adapter *adapter; struct devlink_region *nvm_region; struct devlink_region *sram_region; @@ -538,13 +565,8 @@ struct ice_pf { struct devlink_port devlink_port; /* OS reserved IRQ details */ - struct msix_entry *msix_entries; - struct ice_res_tracker *irq_tracker; - /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the - * number of MSIX vectors needed for all SR-IOV VFs from the number of - * MSIX vectors allowed on this PF. - */ - u16 sriov_base_vector; + struct ice_irq_tracker irq_tracker; + struct ice_virt_irq_tracker virt_irq_tracker; u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ @@ -552,12 +574,15 @@ struct ice_pf { struct ice_vsi_stats **vsi_stats; struct ice_sw *first_sw; /* first switch created by firmware */ u16 eswitch_mode; /* current mode of eswitch */ + struct dentry *ice_debugfs_pf; struct ice_vfs vfs; DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); + DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS); unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ + unsigned long *txtime_txqs; /* bitmap to track PF Tx Time queue */ unsigned long serv_tmr_period; unsigned long serv_tmr_prev; struct timer_list serv_tmr; @@ -566,12 +591,12 @@ struct ice_pf { struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex tc_mutex; /* lock to protect TC changes */ struct mutex adev_mutex; /* lock to protect aux device access */ + struct mutex lag_mutex; /* protect ice_lag struct in PF */ u32 msg_enable; struct ice_ptp ptp; struct gnss_serial *gnss_serial; struct gnss_device *gnss_dev; u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ - u16 rdma_base_vector; /* spinlock to protect the AdminQ wait list */ spinlock_t aq_wait_lock; @@ -582,12 +607,13 @@ struct ice_pf { wait_queue_head_t reset_wait_queue; u32 hw_csum_rx_error; + u32 hw_rx_eipe_error; u32 oicr_err_reg; - u16 oicr_idx; /* Other interrupt cause MSIX vector index */ - u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ + struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ + struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */ u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_rxqs; /* Total Rx queues PF wide */ - u16 num_lan_msix; /* Total MSIX vectors for base driver */ + struct ice_pf_msix msix; u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ @@ -596,6 +622,7 @@ struct ice_pf { u16 globr_count; /* Global reset count */ u16 empr_count; /* EMP reset count */ u16 pfr_count; /* PF reset count */ + u32 link_down_events; u8 wol_ena : 1; /* software state of WoL */ u32 wakeup_reason; /* last wakeup reason */ @@ -603,13 +630,12 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded:1; /* has previous stats been loaded */ - u8 rdma_mode; u16 dcbx_cap; u32 tx_timeout_count; unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; - struct auxiliary_device *adev; + char int_name_ll_ts[ICE_INT_NAME_STR_LEN]; int aux_idx; u32 sw_int_count; /* count of tc_flower filters specific to channel (aka where filter @@ -625,7 +651,11 @@ struct ice_pf { struct ice_link_default_override_tlv link_dflt_override; struct ice_lag *lag; /* Link Aggregation information */ - struct ice_switchdev_info switchdev; + struct ice_eswitch eswitch; + struct ice_esw_br_port *br_port; + + struct xarray dyn_ports; + struct xarray sf_nums; #define ICE_INVALID_AGG_NODE_ID 0 #define ICE_PF_AGG_NODE_ID_START 1 @@ -634,8 +664,16 @@ struct ice_pf { #define ICE_VF_AGG_NODE_ID_START 65 #define ICE_MAX_VF_AGG_NODES 32 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; + struct ice_dplls dplls; + struct device *hwmon_dev; + struct ice_health health_reporters; + struct iidc_rdma_core_dev_info *cdev_info; + + u8 num_quanta_prof_used; }; +extern struct workqueue_struct *ice_lag_wq; + struct ice_netdev_priv { struct ice_vsi *vsi; struct ice_repr *repr; @@ -660,6 +698,18 @@ static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) } /** + * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt + * @pf: Board private structure + * + * Return true if this PF should respond to the Tx timestamp interrupt + * indication in the miscellaneous OICR interrupt handler. + */ +static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf) +{ + return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE; +} + +/** * ice_irq_dynamic_ena - Enable default interrupt generation settings * @hw: pointer to HW struct * @vsi: pointer to VSI struct, can be NULL @@ -670,7 +720,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, struct ice_q_vector *q_vector) { u32 vector = (vsi && q_vector) ? q_vector->reg_idx : - ((struct ice_pf *)hw->back)->oicr_idx; + ((struct ice_pf *)hw->back)->oicr_irq.index; int itr = ICE_ITR_NONE; u32 val; @@ -707,21 +757,61 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) } /** - * ice_xsk_pool - get XSK buffer pool bound to a ring - * @ring: Rx ring to use + * ice_is_txtime_ena - check if Tx Time is enabled on the Tx ring + * @ring: pointer to Tx ring * - * Returns a pointer to xsk_buff_pool structure if there is a buffer pool - * present, NULL otherwise. + * Return: true if the Tx ring has Tx Time enabled, false otherwise. */ -static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) +static inline bool ice_is_txtime_ena(const struct ice_tx_ring *ring) { struct ice_vsi *vsi = ring->vsi; - u16 qid = ring->q_index; + struct ice_pf *pf = vsi->back; + + return test_bit(ring->q_index, pf->txtime_txqs); +} + +/** + * ice_is_txtime_cfg - check if Tx Time is configured on the Tx ring + * @ring: pointer to Tx ring + * + * Return: true if the Tx ring is configured for Tx ring, false otherwise. + */ +static inline bool ice_is_txtime_cfg(const struct ice_tx_ring *ring) +{ + return !!(ring->flags & ICE_TX_FLAGS_TXTIME); +} + +/** + * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID + * @vsi: pointer to VSI + * @qid: index of a queue to look at XSK buff pool presence + * + * Return: A pointer to xsk_buff_pool structure if there is a buffer pool + * attached and configured as zero-copy, NULL otherwise. + */ +static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi, + u16 qid) +{ + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) + if (!ice_is_xdp_ena_vsi(vsi)) return NULL; - return xsk_get_pool_from_qid(vsi->netdev, qid); + return (pool && pool->dev) ? pool : NULL; +} + +/** + * ice_rx_xsk_pool - assign XSK buff pool to Rx ring + * @ring: Rx ring to use + * + * Sets XSK buff pool pointer on Rx ring. + */ +static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring) +{ + struct ice_vsi *vsi = ring->vsi; + u16 qid = ring->q_index; + + WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); } /** @@ -746,12 +836,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) if (!ring) return; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { - ring->xsk_pool = NULL; - return; - } - - ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); + WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); } /** @@ -818,26 +903,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) */ static inline bool ice_is_switchdev_running(struct ice_pf *pf) { - return pf->switchdev.is_running; -} - -/** - * ice_set_sriov_cap - enable SRIOV in PF flags - * @pf: PF struct - */ -static inline void ice_set_sriov_cap(struct ice_pf *pf) -{ - if (pf->hw.func_caps.common_cap.sr_iov_1_1) - set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); -} - -/** - * ice_clear_sriov_cap - disable SRIOV in PF flags - * @pf: PF struct - */ -static inline void ice_clear_sriov_cap(struct ice_pf *pf) -{ - clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); + return pf->eswitch.is_running; } #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 @@ -872,7 +938,12 @@ static inline bool ice_is_adq_active(struct ice_pf *pf) return false; } -bool netif_is_ice(struct net_device *dev); +int ice_debugfs_pf_init(struct ice_pf *pf); +void ice_debugfs_pf_deinit(struct ice_pf *pf); +void ice_debugfs_init(void); +void ice_debugfs_exit(void); + +bool netif_is_ice(const struct net_device *dev); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); int ice_vsi_open_ctrl(struct ice_vsi *vsi); @@ -880,6 +951,7 @@ int ice_vsi_open(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_repr_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); +void ice_set_ethtool_sf_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked); @@ -893,9 +965,17 @@ int ice_down(struct ice_vsi *vsi); int ice_down_up(struct ice_vsi *vsi); int ice_vsi_cfg_lan(struct ice_vsi *vsi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); + +enum ice_xdp_cfg { + ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */ + ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */ +}; + int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); -int ice_destroy_xdp_rings(struct ice_vsi *vsi); +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, + enum ice_xdp_cfg cfg_type); +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); +void ice_map_xdp_rings(struct ice_vsi *vsi); int ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); @@ -903,6 +983,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); +int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); @@ -910,7 +991,6 @@ int ice_plug_aux_dev(struct ice_pf *pf); void ice_unplug_aux_dev(struct ice_pf *pf); int ice_init_rdma(struct ice_pf *pf); void ice_deinit_rdma(struct ice_pf *pf); -const char *ice_aq_str(enum ice_aq_err aq_err); bool ice_is_wol_supported(struct ice_hw *hw); void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); int @@ -928,14 +1008,46 @@ void ice_fdir_release_flows(struct ice_hw *hw); void ice_fdir_replay_flows(struct ice_hw *hw); void ice_fdir_replay_fltrs(struct ice_pf *pf); int ice_fdir_create_dflt_rules(struct ice_pf *pf); -int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, - struct ice_rq_event_info *event); + +enum ice_aq_task_state { + ICE_AQ_TASK_NOT_PREPARED, + ICE_AQ_TASK_WAITING, + ICE_AQ_TASK_COMPLETE, + ICE_AQ_TASK_CANCELED, +}; + +struct ice_aq_task { + struct hlist_node entry; + struct ice_rq_event_info event; + enum ice_aq_task_state state; + u16 opcode; +}; + +void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, + u16 opcode); +int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, + unsigned long timeout); int ice_open(struct net_device *netdev); int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); +void ice_start_service_task(struct ice_pf *pf); int ice_load(struct ice_pf *pf); void ice_unload(struct ice_pf *pf); +void ice_adv_lnk_speed_maps_init(void); +void ice_init_dev_hw(struct ice_pf *pf); +int ice_init_dev(struct ice_pf *pf); +void ice_deinit_dev(struct ice_pf *pf); +int ice_init_pf(struct ice_pf *pf); +void ice_deinit_pf(struct ice_pf *pf); +int ice_change_mtu(struct net_device *netdev, int new_mtu); +void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue); +int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp); +void ice_set_netdev_features(struct net_device *netdev); +int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); +int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +void ice_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); /** * ice_set_rdma_cap - enable RDMA support @@ -962,4 +1074,64 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); } + +extern const struct xdp_metadata_ops ice_xdp_md_ops; + +/** + * ice_is_dual - Check if given config is multi-NAC + * @hw: pointer to HW structure + * + * Return: true if the device is running in mutli-NAC (Network + * Acceleration Complex) configuration variant, false otherwise + * (always false for non-E825 devices). + */ +static inline bool ice_is_dual(struct ice_hw *hw) +{ + return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); +} + +/** + * ice_is_primary - Check if given device belongs to the primary complex + * @hw: pointer to HW structure + * + * Check if given PF/HW is running on primary complex in multi-NAC + * configuration. + * + * Return: true if the device is dual, false otherwise (always true + * for non-E825 devices). + */ +static inline bool ice_is_primary(struct ice_hw *hw) +{ + return hw->mac_type != ICE_MAC_GENERIC_3K_E825 || + !ice_is_dual(hw) || + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M); +} + +/** + * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF + * @pf: pointer to PF structure + * + * Return: true if PF owns primary timer, false otherwise. + */ +static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf) +{ + return pf->hw.func_caps.ts_func_info.src_tmr_owned && + ice_is_primary(&pf->hw); +} + +/** + * ice_get_primary_hw - Get pointer to primary ice_hw structure + * @pf: pointer to PF structure + * + * Return: A pointer to ice_hw structure with access to timesync + * register space. + */ +static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf) +{ + if (!pf->adapter->ctrl_pf) + return &pf->hw; + else + return &pf->adapter->ctrl_pf->hw; +} #endif /* _ICE_H_ */ |
