diff options
Diffstat (limited to 'drivers/net/ethernet/sfc/net_driver.h')
| -rw-r--r-- | drivers/net/ethernet/sfc/net_driver.h | 487 |
1 files changed, 291 insertions, 196 deletions
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 1afb58feb9ab..b98c259f672d 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -15,7 +15,7 @@ #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/timer.h> -#include <linux/mdio.h> +#include <linux/mii.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/device.h> @@ -27,6 +27,7 @@ #include <linux/mtd/mtd.h> #include <net/busy_poll.h> #include <net/xdp.h> +#include <net/netevent.h> #include "enum.h" #include "bitfield.h" @@ -38,8 +39,6 @@ * **************************************************************************/ -#define EFX_DRIVER_VERSION "4.1" - #ifdef DEBUG #define EFX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x) #define EFX_WARN_ON_PARANOID(x) WARN_ON(x) @@ -58,17 +57,19 @@ #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS #define EFX_EXTRA_CHANNEL_IOV 0 #define EFX_EXTRA_CHANNEL_PTP 1 -#define EFX_MAX_EXTRA_CHANNELS 2U +#define EFX_EXTRA_CHANNEL_TC 2 +#define EFX_MAX_EXTRA_CHANNELS 3U /* Checksum generation is a per-queue option in hardware, so each * queue visible to the networking core is backed by two hardware TX * queues. */ #define EFX_MAX_TX_TC 2 #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) -#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ -#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ +#define EFX_TXQ_TYPE_OUTER_CSUM 1 /* Outer checksum offload */ +#define EFX_TXQ_TYPE_INNER_CSUM 2 /* Inner checksum offload */ #define EFX_TXQ_TYPES 4 -#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) +#define EFX_MAX_TXQ_PER_CHANNEL 4 +#define EFX_MAX_TX_QUEUES (EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_CHANNELS) /* Maximum possible MTU the driver supports */ #define EFX_MAX_MTU (9 * 1024) @@ -76,6 +77,9 @@ /* Minimum MTU, from RFC791 (IP) */ #define EFX_MIN_MTU 68 +/* Maximum total header length for TSOv2 */ +#define EFX_TSO2_MAX_HDRLEN 208 + /* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, * and should be a multiple of the cache line size. */ @@ -119,26 +123,6 @@ struct efx_buffer { }; /** - * struct efx_special_buffer - DMA buffer entered into buffer table - * @buf: Standard &struct efx_buffer - * @index: Buffer index within controller;s buffer table - * @entries: Number of buffer table entries - * - * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE. - * Event and descriptor rings are addressed via one or more buffer - * table entries (and so can be physically non-contiguous, although we - * currently do not take advantage of that). On Falcon and Siena we - * have to take care of allocating and initialising the entries - * ourselves. On later hardware this is managed by the firmware and - * @index and @entries are left as 0. - */ -struct efx_special_buffer { - struct efx_buffer buf; - unsigned int index; - unsigned int entries; -}; - -/** * struct efx_tx_buffer - buffer state for a TX descriptor * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be * freed when descriptor completes @@ -173,6 +157,8 @@ struct efx_tx_buffer { #define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ #define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */ #define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */ +#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */ +#define EFX_TX_BUF_EFV 0x100 /* buffer was sent from representor */ /** * struct efx_tx_queue - An Efx TX queue @@ -189,7 +175,11 @@ struct efx_tx_buffer { * * @efx: The associated Efx NIC * @queue: DMA queue number + * @label: Label for TX completion events. + * Is our index within @channel->tx_queue array. + * @type: configuration type of this TX queue. A bitmask of %EFX_TXQ_TYPE_* flags. * @tso_version: Version of TSO in use for this queue. + * @tso_encap: Is encapsulated TSO supported? Supported in TSOv2 on 8000 series. * @channel: The associated channel * @core_txq: The networking core TX queue structure * @buffer: The software buffer ring @@ -203,8 +193,12 @@ struct efx_tx_buffer { * @initialised: Has hardware queue been initialised? * @timestamping: Is timestamping enabled for this channel? * @xdp_tx: Is this an XDP tx queue? - * @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and - * may also map tx data, depending on the nature of the TSO implementation. + * @old_complete_packets: Value of @complete_packets as of last + * efx_init_tx_queue() + * @old_complete_bytes: Value of @complete_bytes as of last + * efx_init_tx_queue() + * @old_tso_bursts: Value of @tso_bursts as of last efx_init_tx_queue() + * @old_tso_packets: Value of @tso_packets as of last efx_init_tx_queue() * @read_count: Current read pointer. * This is the number of buffers that have been removed from both rings. * @old_write_count: The value of @write_count when last checked. @@ -214,6 +208,20 @@ struct efx_tx_buffer { * avoid cache-line ping-pong between the xmit path and the * completion path. * @merge_events: Number of TX merged completion events + * @bytes_compl: Number of bytes completed during this NAPI poll + * (efx_process_channel()). For BQL. + * @pkts_compl: Number of packets completed during this NAPI poll. + * @complete_packets: Number of packets completed since this struct was + * created. Only counts SKB packets, not XDP TX (it accumulates + * the same values that are reported to BQL). + * @complete_bytes: Number of bytes completed since this struct was + * created. For TSO, counts the superframe size, not the sizes of + * generated frames on the wire (i.e. the headers are only counted + * once) + * @complete_xdp_packets: Number of XDP TX packets completed since this + * struct was created. + * @complete_xdp_bytes: Number of XDP TX bytes completed since this + * struct was created. * @completed_timestamp_major: Top part of the most recent tx timestamp. * @completed_timestamp_minor: Low part of the most recent tx timestamp. * @insert_count: Current insert pointer @@ -227,7 +235,7 @@ struct efx_tx_buffer { * Normally this will equal @write_count, but as option descriptors * don't produce completion events, they won't update this. * Filled in iff @efx->type->option_descriptors; only used for PIO. - * Thus, this is written and used on EF10, and neither on farch. + * Thus, this is only written and used on EF10. * @old_read_count: The value of read_count when last checked. * This is here for performance reasons. The xmit path will * only get the up-to-date value of read_count if this @@ -241,8 +249,10 @@ struct efx_tx_buffer { * @tso_fallbacks: Number of times TSO fallback used * @pushes: Number of times the TX push feature has been used * @pio_packets: Number of times the TX PIO feature has been used - * @xmit_more_available: Are any packets waiting to be pushed to the NIC + * @xmit_pending: Are any packets waiting to be pushed to the NIC * @cb_packets: Number of times the TX copybreak feature has been used + * @notify_count: Count of notified descriptors to the NIC + * @tx_packets: Number of packets sent since this struct was created * @empty_read_count: If the completion path has seen the queue as empty * and the transmission path has not yet checked this, the value of * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. @@ -250,22 +260,26 @@ struct efx_tx_buffer { struct efx_tx_queue { /* Members which don't change on the fast path */ struct efx_nic *efx ____cacheline_aligned_in_smp; - unsigned queue; + unsigned int queue; + unsigned int label; + unsigned int type; unsigned int tso_version; + bool tso_encap; struct efx_channel *channel; struct netdev_queue *core_txq; struct efx_tx_buffer *buffer; struct efx_buffer *cb_page; - struct efx_special_buffer txd; + struct efx_buffer txd; unsigned int ptr_mask; void __iomem *piobuf; unsigned int piobuf_offset; bool initialised; bool timestamping; bool xdp_tx; - - /* Function pointers used in the fast path. */ - int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *); + unsigned long old_complete_packets; + unsigned long old_complete_bytes; + unsigned int old_tso_bursts; + unsigned int old_tso_packets; /* Members used mainly on the completion path */ unsigned int read_count ____cacheline_aligned_in_smp; @@ -273,6 +287,10 @@ struct efx_tx_queue { unsigned int merge_events; unsigned int bytes_compl; unsigned int pkts_compl; + unsigned long complete_packets; + unsigned long complete_bytes; + unsigned long complete_xdp_packets; + unsigned long complete_xdp_bytes; u32 completed_timestamp_major; u32 completed_timestamp_minor; @@ -287,8 +305,9 @@ struct efx_tx_queue { unsigned int tso_fallbacks; unsigned int pushes; unsigned int pio_packets; - bool xmit_more_available; + bool xmit_pending; unsigned int cb_packets; + unsigned int notify_count; /* Statistics to supplement MAC stats */ unsigned long tx_packets; @@ -353,8 +372,12 @@ struct efx_rx_page_state { * @refill_enabled: Enable refill whenever fill level is low * @flush_pending: Set when a RX flush is pending. Has the same lifetime as * @rxq_flush_pending. + * @grant_credits: Posted RX descriptors need to be granted to the MAE with + * %MC_CMD_MAE_COUNTERS_STREAM_GIVE_CREDITS. For %EFX_EXTRA_CHANNEL_TC, + * and only supported on EF100. * @added_count: Number of buffers added to the receive queue. * @notified_count: Number of buffers given to NIC (<= @added_count). + * @granted_count: Number of buffers granted to the MAE (<= @notified_count). * @removed_count: Number of buffers removed from the receive queue. * @scatter_n: Used by NIC specific receive code. * @scatter_len: Used by NIC specific receive code. @@ -375,20 +398,26 @@ struct efx_rx_page_state { * refill was triggered. * @recycle_count: RX buffer recycle counter. * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). + * @grant_work: workitem used to grant credits to the MAE if @grant_credits + * @rx_packets: Number of packets received since this struct was created + * @rx_bytes: Number of bytes received since this struct was created + * @old_rx_packets: Value of @rx_packets as of last efx_init_rx_queue() + * @old_rx_bytes: Value of @rx_bytes as of last efx_init_rx_queue() * @xdp_rxq_info: XDP specific RX queue information. - * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?. */ struct efx_rx_queue { struct efx_nic *efx; int core_index; struct efx_rx_buffer *buffer; - struct efx_special_buffer rxd; + struct efx_buffer rxd; unsigned int ptr_mask; bool refill_enabled; bool flush_pending; + bool grant_credits; unsigned int added_count; unsigned int notified_count; + unsigned int granted_count; unsigned int removed_count; unsigned int scatter_n; unsigned int scatter_len; @@ -406,10 +435,13 @@ struct efx_rx_queue { unsigned int recycle_count; struct timer_list slow_fill; unsigned int slow_fill_count; + struct work_struct grant_work; /* Statistics to supplement MAC stats */ unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long old_rx_packets; + unsigned long old_rx_bytes; struct xdp_rxq_info xdp_rxq_info; - bool xdp_rxq_info_valid; }; enum efx_sync_events_state { @@ -449,14 +481,12 @@ enum efx_sync_events_state { * were checked for expiry * @rfs_expire_index: next accelerated RFS filter ID to check for expiry * @n_rfs_succeeded: number of successful accelerated RFS filter insertions - * @n_rfs_failed; number of failed accelerated RFS filter insertions + * @n_rfs_failed: number of failed accelerated RFS filter insertions * @filter_work: Work item for efx_filter_rfs_expire() * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, * indexed by filter ID - * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors - * @n_rx_mcast_mismatch: Count of unmatched multicast frames * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors * @n_rx_overlength: Count of RX_OVERLENGTH errors * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun @@ -468,6 +498,12 @@ enum efx_sync_events_state { * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP + * @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was + * not recognised + * @old_n_rx_hw_drops: Count of all RX packets dropped for any reason as of last + * efx_start_channels() + * @old_n_rx_hw_drop_overruns: Value of @n_rx_nodesc_trunc as of last + * efx_start_channels() * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by * __efx_rx_packet(), or zero if there is none * @rx_pkt_index: Ring index of first buffer for next packet to be delivered @@ -475,6 +511,7 @@ enum efx_sync_events_state { * @rx_list: list of SKBs from current RX, awaiting processing * @rx_queue: RX queue for this channel * @tx_queue: TX queues for this channel + * @tx_queue_by_type: pointers into @tx_queue, or %NULL, indexed by txq type * @sync_events_state: Current state of sync events on this channel * @sync_timestamp_major: Major part of the last ptp sync event * @sync_timestamp_minor: Minor part of the last ptp sync event @@ -492,7 +529,7 @@ struct efx_channel { #ifdef CONFIG_NET_RX_BUSY_POLL unsigned long busy_poll_state; #endif - struct efx_special_buffer eventq; + struct efx_buffer eventq; unsigned int eventq_mask; unsigned int eventq_read_ptr; int event_test_cpu; @@ -510,7 +547,6 @@ struct efx_channel { u32 *rps_flow_id; #endif - unsigned int n_rx_tobe_disc; unsigned int n_rx_ip_hdr_chksum_err; unsigned int n_rx_tcp_udp_chksum_err; unsigned int n_rx_outer_ip_hdr_chksum_err; @@ -518,7 +554,6 @@ struct efx_channel { unsigned int n_rx_inner_ip_hdr_chksum_err; unsigned int n_rx_inner_tcp_udp_chksum_err; unsigned int n_rx_eth_crc_err; - unsigned int n_rx_mcast_mismatch; unsigned int n_rx_frm_trunc; unsigned int n_rx_overlength; unsigned int n_skbuff_leaks; @@ -529,6 +564,10 @@ struct efx_channel { unsigned int n_rx_xdp_bad_drops; unsigned int n_rx_xdp_tx; unsigned int n_rx_xdp_redirect; + unsigned int n_rx_mport_bad; + + unsigned int old_n_rx_hw_drops; + unsigned int old_n_rx_hw_drop_overruns; unsigned int rx_pkt_n_frags; unsigned int rx_pkt_index; @@ -536,7 +575,8 @@ struct efx_channel { struct list_head *rx_list; struct efx_rx_queue rx_queue; - struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; + struct efx_tx_queue tx_queue[EFX_MAX_TXQ_PER_CHANNEL]; + struct efx_tx_queue *tx_queue_by_type[EFX_TXQ_TYPES]; enum efx_sync_events_state sync_events_state; u32 sync_timestamp_major; @@ -562,12 +602,15 @@ struct efx_msi_context { * struct efx_channel_type - distinguishes traffic and extra channels * @handle_no_channel: Handle failure to allocate an extra channel * @pre_probe: Set up extra state prior to initialisation + * @start: called early in efx_start_channels() + * @stop: called early in efx_stop_channels() * @post_remove: Tear down extra state after finalisation, if allocated. * May be called on channels that have not been probed. * @get_name: Generate the channel's name (used for its IRQ handler) * @copy: Copy the channel state prior to reallocation. May be %NULL if * reallocation is not supported. * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() + * @receive_raw: Handle an RX buffer ready to be passed to __efx_rx_packet() * @want_txqs: Determine whether this channel should have TX queues * created. If %NULL, TX queues are not created. * @keep_eventq: Flag for whether event queue should be kept initialised @@ -578,10 +621,13 @@ struct efx_msi_context { struct efx_channel_type { void (*handle_no_channel)(struct efx_nic *); int (*pre_probe)(struct efx_channel *); + int (*start)(struct efx_channel *); + void (*stop)(struct efx_channel *); void (*post_remove)(struct efx_channel *); void (*get_name)(struct efx_channel *, char *buf, size_t len); struct efx_channel *(*copy)(const struct efx_channel *); bool (*receive_skb)(struct efx_channel *, struct sk_buff *); + bool (*receive_raw)(struct efx_rx_queue *, u32); bool (*want_txqs)(struct efx_channel *); bool keep_eventq; bool want_pio; @@ -601,13 +647,6 @@ extern const unsigned int efx_loopback_mode_max; #define LOOPBACK_MODE(efx) \ STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) -extern const char *const efx_reset_type_names[]; -extern const unsigned int efx_reset_type_max; -#define RESET_TYPE(type) \ - STRING_TABLE_LOOKUP(type, efx_reset_type) - -void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen); - enum efx_int_mode { /* Be careful if altering to correct macro below */ EFX_INT_MODE_MSIX = 0, @@ -618,12 +657,55 @@ enum efx_int_mode { #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) enum nic_state { - STATE_UNINIT = 0, /* device being probed/removed or is frozen */ - STATE_READY = 1, /* hardware ready and netdev registered */ - STATE_DISABLED = 2, /* device disabled due to hardware errors */ - STATE_RECOVERY = 3, /* device recovering from PCI error */ + STATE_UNINIT = 0, /* device being probed/removed */ + STATE_PROBED, /* hardware probed */ + STATE_NET_DOWN, /* netdev registered */ + STATE_NET_UP, /* ready for traffic */ + STATE_DISABLED, /* device disabled due to hardware errors */ + + STATE_RECOVERY = 0x100,/* recovering from PCI error */ + STATE_FROZEN = 0x200, /* frozen by power management */ }; +static inline bool efx_net_active(enum nic_state state) +{ + return state == STATE_NET_DOWN || state == STATE_NET_UP; +} + +static inline bool efx_frozen(enum nic_state state) +{ + return state & STATE_FROZEN; +} + +static inline bool efx_recovering(enum nic_state state) +{ + return state & STATE_RECOVERY; +} + +static inline enum nic_state efx_freeze(enum nic_state state) +{ + WARN_ON(!efx_net_active(state)); + return state | STATE_FROZEN; +} + +static inline enum nic_state efx_thaw(enum nic_state state) +{ + WARN_ON(!efx_frozen(state)); + return state & ~STATE_FROZEN; +} + +static inline enum nic_state efx_recover(enum nic_state state) +{ + WARN_ON(!efx_net_active(state)); + return state | STATE_RECOVERY; +} + +static inline enum nic_state efx_recovered(enum nic_state state) +{ + WARN_ON(!efx_recovering(state)); + return state & ~STATE_RECOVERY; +} + /* Forward declaration */ struct efx_nic; @@ -654,51 +736,6 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left, } /** - * struct efx_phy_operations - Efx PHY operations table - * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, - * efx->loopback_modes. - * @init: Initialise PHY - * @fini: Shut down PHY - * @reconfigure: Reconfigure PHY (e.g. for new link parameters) - * @poll: Update @link_state and report whether it changed. - * Serialised by the mac_lock. - * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock. - * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock. - * @get_fecparam: Get Forward Error Correction settings. Serialised by mac_lock. - * @set_fecparam: Set Forward Error Correction settings. Serialised by mac_lock. - * @set_npage_adv: Set abilities advertised in (Extended) Next Page - * (only needed where AN bit is set in mmds) - * @test_alive: Test that PHY is 'alive' (online) - * @test_name: Get the name of a PHY-specific test/result - * @run_tests: Run tests and record results as appropriate (offline). - * Flags are the ethtool tests flags. - */ -struct efx_phy_operations { - int (*probe) (struct efx_nic *efx); - int (*init) (struct efx_nic *efx); - void (*fini) (struct efx_nic *efx); - void (*remove) (struct efx_nic *efx); - int (*reconfigure) (struct efx_nic *efx); - bool (*poll) (struct efx_nic *efx); - void (*get_link_ksettings)(struct efx_nic *efx, - struct ethtool_link_ksettings *cmd); - int (*set_link_ksettings)(struct efx_nic *efx, - const struct ethtool_link_ksettings *cmd); - int (*get_fecparam)(struct efx_nic *efx, struct ethtool_fecparam *fec); - int (*set_fecparam)(struct efx_nic *efx, - const struct ethtool_fecparam *fec); - void (*set_npage_adv) (struct efx_nic *efx, u32); - int (*test_alive) (struct efx_nic *efx); - const char *(*test_name) (struct efx_nic *efx, unsigned int index); - int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); - int (*get_module_eeprom) (struct efx_nic *efx, - struct ethtool_eeprom *ee, - u8 *data); - int (*get_module_info) (struct efx_nic *efx, - struct ethtool_modinfo *modinfo); -}; - -/** * enum efx_phy_mode - PHY operating mode flags * @PHY_MODE_NORMAL: on and should pass traffic * @PHY_MODE_TX_DISABLED: on with TX disabled @@ -732,38 +769,29 @@ struct efx_hw_stat_desc { u16 offset; }; -/* Number of bits used in a multicast filter hash address */ -#define EFX_MCAST_HASH_BITS 8 - -/* Number of (single-bit) entries in a multicast filter hash */ -#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) - -/* An Efx multicast filter hash */ -union efx_multicast_hash { - u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; - efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; -}; - struct vfdi_status; /* The reserved RSS context value */ #define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff /** - * struct efx_rss_context - A user-defined RSS context for filtering - * @list: node of linked list on which this struct is stored + * struct efx_rss_context_priv - driver private data for an RSS context * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC. - * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID. - * @user_id: the rss_context ID exposed to userspace over ethtool. * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled + */ +struct efx_rss_context_priv { + u32 context_id; + bool rx_hash_udp_4tuple; +}; + +/** + * struct efx_rss_context - an RSS context + * @priv: hardware-specific state * @rx_hash_key: Toeplitz hash key for this RSS context * @indir_table: Indirection table for this RSS context */ struct efx_rss_context { - struct list_head list; - u32 context_id; - u32 user_id; - bool rx_hash_udp_4tuple; + struct efx_rss_context_priv priv; u8 rx_hash_key[40]; u32 rx_indir_table[128]; }; @@ -801,6 +829,7 @@ struct efx_arfs_rule { /** * struct efx_async_filter_insertion - Request to asynchronously insert a filter * @net_dev: Reference to the netdevice + * @net_dev_tracker: reference tracker entry for @net_dev * @spec: The filter to insert * @work: Workitem for this request * @rxq_index: Identifies the channel for which this request was made @@ -808,6 +837,7 @@ struct efx_arfs_rule { */ struct efx_async_filter_insertion { struct net_device *net_dev; + netdevice_tracker net_dev_tracker; struct efx_filter_spec spec; struct work_struct work; u16 rxq_index; @@ -818,6 +848,14 @@ struct efx_async_filter_insertion { #define EFX_RPS_MAX_IN_FLIGHT 8 #endif /* CONFIG_RFS_ACCEL */ +enum efx_xdp_tx_queues_mode { + EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */ + EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */ + EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */ +}; + +struct efx_mae; + /** * struct efx_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) @@ -842,6 +880,7 @@ struct efx_async_filter_insertion { * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds * @timer_max_ns: Interrupt timer maximum value, in nanoseconds * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues + * @irqs_hooked: Channel interrupts are hooked * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues * @irq_rx_moderation_us: IRQ moderation time for RX event queues * @msg_enable: Log message enable flags @@ -853,8 +892,10 @@ struct efx_async_filter_insertion { * @msi_context: Context for each MSI * @extra_channel_types: Types of extra (non-traffic) channels that * should be allocated for this NIC + * @mae: Details of the Match Action Engine * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues. * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit. + * @xdp_txq_queues_mode: XDP TX queues sharing strategy. * @rxq_entries: Size of receive queues requested by user. * @txq_entries: Size of transmit queues requested by user. * @txq_stop_thresh: TX queue fill level at or above which we stop it. @@ -862,11 +903,11 @@ struct efx_async_filter_insertion { * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches * @sram_lim_qw: Qword address limit of SRAM - * @next_buffer_table: First available buffer table id * @n_channels: Number of channels in use * @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_tx_channels: Number of channels used for TX * @n_extra_tx_channels: Number of extra channels with TX queues + * @tx_queues_per_channel: number of TX queues probed on each channel * @n_xdp_channels: Number of channels used for XDP TX * @xdp_channel_offset: Offset of zeroth channel used for XPD TX. * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel. @@ -884,9 +925,7 @@ struct efx_async_filter_insertion { * @rx_packet_ts_offset: Offset of timestamp from start of packet data * (valid only if channel->sync_timestamps_enabled; always negative) * @rx_scatter: Scatter mode enabled for receives - * @rss_context: Main RSS context. Its @list member is the head of the list of - * RSS contexts created by user requests - * @rss_lock: Protects custom RSS context software state in @rss_context.list + * @rss_context: Main RSS context. * @vport_id: The function's vport ID, only relevant for PFs * @int_error_count: Number of internal errors seen recently * @int_error_expire: Time at which error count will be expired @@ -914,20 +953,13 @@ struct efx_async_filter_insertion { * field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS) * @stats_buffer: DMA buffer for statistics * @phy_type: PHY type - * @phy_op: PHY interface * @phy_data: PHY private data (including PHY-specific stats) - * @mdio: PHY MDIO interface - * @mdio_bus: PHY MDIO bus ID (only used by Siena) * @phy_mode: PHY operating mode. Serialised by @mac_lock. * @link_advertising: Autonegotiation advertising flags * @fec_config: Forward Error Correction configuration flags. For bit positions * see &enum ethtool_fec_config_bits. * @link_state: Current state of the link * @n_link_state_changes: Number of times the link has changed state - * @unicast_filter: Flag for Falcon-arch simple unicast filter. - * Protected by @mac_lock. - * @multicast_hash: Multicast hash table for Falcon-arch. - * Protected by @mac_lock. * @wanted_fc: Wanted flow control flags * @fc_disable: When non-zero flow control is disabled. Typically used to * ensure that network back pressure doesn't delay dma queue flushes. @@ -956,11 +988,21 @@ struct efx_async_filter_insertion { * @vf_count: Number of VFs intended to be enabled. * @vf_init_count: Number of VFs that have been fully initialised. * @vi_scale: log2 number of vnics per VF. + * @vf_reps_lock: Protects vf_reps list + * @vf_reps: local VF reps * @ptp_data: PTP state data * @ptp_warned: has this NIC seen and warned about unexpected PTP events? * @vpd_sn: Serial number read from VPD * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their * xdp_rxq_info structures? + * @netdev_notifier: Netdevice notifier. + * @netevent_notifier: Netevent notifier (for neighbour updates). + * @tc: state for TC offload (EF100). + * @devlink: reference to devlink structure owned by this device + * @dl_port: devlink port associated with the PF + * @mem_bar: The BAR that is mapped into membase. + * @reg_base: Offset from the start of the bar to the function control window. + * @reflash_mutex: Mutex for serialising firmware reflash operations. * @monitor_work: Hardware monitor workitem * @biu_lock: BIU (bus interface unit) lock * @last_irq_cpu: Last CPU to handle a possible test interrupt. This @@ -996,6 +1038,7 @@ struct efx_nic { unsigned int timer_quantum_ns; unsigned int timer_max_ns; bool irq_rx_adaptive; + bool irqs_hooked; unsigned int irq_mod_step_us; unsigned int irq_rx_moderation_us; u32 msg_enable; @@ -1007,9 +1050,11 @@ struct efx_nic { struct efx_msi_context msi_context[EFX_MAX_CHANNELS]; const struct efx_channel_type * extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; + struct efx_mae *mae; unsigned int xdp_tx_queue_count; struct efx_tx_queue **xdp_tx_queues; + enum efx_xdp_tx_queues_mode xdp_txq_queues_mode; unsigned rxq_entries; unsigned txq_entries; @@ -1019,9 +1064,9 @@ struct efx_nic { unsigned tx_dc_base; unsigned rx_dc_base; unsigned sram_lim_qw; - unsigned next_buffer_table; unsigned int max_channels; + unsigned int max_vis; unsigned int max_tx_channels; unsigned n_channels; unsigned n_rx_channels; @@ -1029,6 +1074,7 @@ struct efx_nic { unsigned tx_channel_offset; unsigned n_tx_channels; unsigned n_extra_tx_channels; + unsigned int tx_queues_per_channel; unsigned int n_xdp_channels; unsigned int xdp_channel_offset; unsigned int xdp_tx_per_channel; @@ -1045,7 +1091,6 @@ struct efx_nic { int rx_packet_ts_offset; bool rx_scatter; struct efx_rss_context rss_context; - struct mutex rss_lock; u32 vport_id; unsigned int_error_count; @@ -1082,10 +1127,7 @@ struct efx_nic { bool rx_nodesc_drops_prev_state; unsigned int phy_type; - const struct efx_phy_operations *phy_op; void *phy_data; - struct mdio_if_info mdio; - unsigned int mdio_bus; enum efx_phy_mode phy_mode; __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising); @@ -1093,8 +1135,6 @@ struct efx_nic { struct efx_link_state link_state; unsigned int n_link_state_changes; - bool unicast_filter; - union efx_multicast_hash multicast_hash; u8 wanted_fc; unsigned fc_disable; @@ -1129,6 +1169,8 @@ struct efx_nic { unsigned vf_init_count; unsigned vi_scale; #endif + spinlock_t vf_reps_lock; + struct list_head vf_reps; struct efx_ptp_data *ptp_data; bool ptp_warned; @@ -1136,6 +1178,16 @@ struct efx_nic { char *vpd_sn; bool xdp_rxq_info_failed; + struct notifier_block netdev_notifier; + struct notifier_block netevent_notifier; + struct efx_tc_state *tc; + + struct devlink *devlink; + struct devlink_port *dl_port; + unsigned int mem_bar; + u32 reg_base; + struct mutex reflash_mutex; + /* The following fields may be written more often */ struct delayed_work monitor_work ____cacheline_aligned_in_smp; @@ -1145,6 +1197,24 @@ struct efx_nic { atomic_t n_rx_noskb_drops; }; +/** + * struct efx_probe_data - State after hardware probe + * @pci_dev: The PCI device + * @efx: Efx NIC details + */ +struct efx_probe_data { + struct pci_dev *pci_dev; + struct efx_nic efx; +}; + +static inline struct efx_nic *efx_netdev_priv(struct net_device *dev) +{ + struct efx_probe_data **probe_ptr = netdev_priv(dev); + struct efx_probe_data *probe_data = *probe_ptr; + + return &probe_data->efx; +} + static inline int efx_dev_registered(struct efx_nic *efx) { return efx->net_dev->reg_state == NETREG_REGISTERED; @@ -1164,12 +1234,9 @@ struct efx_mtd_partition { }; struct efx_udp_tunnel { +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID 0xffff u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */ __be16 port; - /* Count of repeated adds of the same port. Used only inside the list, - * not in request arguments. - */ - u16 count; }; /** @@ -1191,19 +1258,17 @@ struct efx_udp_tunnel { * @remove_port: Free resources allocated by probe_port() * @handle_global_event: Handle a "global" event (may be %NULL) * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues) - * @prepare_flush: Prepare the hardware for flushing the DMA queues - * (for Falcon architecture) - * @finish_flush: Clean up after flushing the DMA queues (for Falcon - * architecture) * @prepare_flr: Prepare for an FLR * @finish_flr: Clean up after an FLR * @describe_stats: Describe statistics for ethtool * @update_stats: Update statistics not provided by event handling. * Either argument may be %NULL. + * @update_stats_atomic: Update statistics while in atomic context, if that + * is more limiting than @update_stats. Otherwise, leave %NULL and + * driver core will call @update_stats. * @start_stats: Start the regular fetching of statistics * @pull_stats: Pull stats from the NIC and wait until they arrive. * @stop_stats: Stop the regular fetching of statistics - * @set_id_led: Set state of identifying LED or revert to automatic function * @push_irq_moderation: Apply interrupt moderation value * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL) @@ -1213,8 +1278,8 @@ struct efx_udp_tunnel { * @get_wol: Get WoL configuration from driver state * @set_wol: Push WoL configuration to the NIC * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) - * @test_chip: Test registers. May use efx_farch_test_registers(), and is - * expected to reset the NIC. + * @get_fec_stats: Get standard FEC statistics. + * @test_chip: Test registers. This is expected to reset the NIC. * @test_nvram: Test validity of NVRAM contents * @mcdi_request: Send an MCDI request with the given header and SDU. * The SDU length may be any value from 0 up to the protocol- @@ -1236,10 +1301,11 @@ struct efx_udp_tunnel { * a pointer to the &struct efx_msi_context for the channel. * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument * is a pointer to the &struct efx_nic. - * @tx_probe: Allocate resources for TX queue + * @tx_probe: Allocate resources for TX queue (and select TXQ type) * @tx_init: Initialise TX queue on the NIC * @tx_remove: Free resources for TX queue * @tx_write: Write TX descriptors and doorbell + * @tx_enqueue: Add an SKB to TX queue * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC * @rx_push_rss_context_config: Write RSS hash key and indirection table for @@ -1251,6 +1317,8 @@ struct efx_udp_tunnel { * @rx_remove: Free resources for RX queue * @rx_write: Write RX descriptors and doorbell * @rx_defer_refill: Generate a refill reminder event + * @rx_packet: Receive the queued RX buffer on a channel + * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash * @ev_probe: Allocate resources for event queue * @ev_init: Initialise event queue on the NIC * @ev_fini: Deinitialise event queue on the NIC @@ -1293,10 +1361,10 @@ struct efx_udp_tunnel { * @tso_versions: Returns mask of firmware-assisted TSO versions supported. * If %NULL, then device does not support any TSO version. * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required. - * @udp_tnl_add_port: Add a UDP tunnel port * @udp_tnl_has_port: Check if a port has been added as UDP tunnel - * @udp_tnl_del_port: Remove a UDP tunnel port * @print_additional_fwver: Dump NIC-specific additional FW version info + * @sensor_event: Handle a sensor event from MCDI + * @rx_recycle_ring_size: Size of the RX recycle ring * @revision: Hardware architecture revision * @txd_ptr_tbl_base: TX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address @@ -1311,10 +1379,10 @@ struct efx_udp_tunnel { * @can_rx_scatter: NIC is able to scatter packets to multiple buffers * @always_rx_scatter: NIC will always scatter packets to multiple buffers * @option_descriptors: NIC supports TX option descriptors + * @flash_auto_partition: firmware flash uses AUTO partition, driver does + * not need to perform image parsing * @min_interrupt_mode: Lowest capability interrupt mode supported * from &enum efx_int_mode. - * @max_interrupt_mode: Highest capability interrupt mode supported - * from &enum efx_int_mode. * @timer_period_max: Maximum period of interrupt timer (in ticks) * @offload_features: net_device feature flags for protocol offload * features implemented in hardware @@ -1338,25 +1406,26 @@ struct efx_nic_type { void (*remove_port)(struct efx_nic *efx); bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); int (*fini_dmaq)(struct efx_nic *efx); - void (*prepare_flush)(struct efx_nic *efx); - void (*finish_flush)(struct efx_nic *efx); void (*prepare_flr)(struct efx_nic *efx); void (*finish_flr)(struct efx_nic *efx); - size_t (*describe_stats)(struct efx_nic *efx, u8 *names); + size_t (*describe_stats)(struct efx_nic *efx, u8 **names); size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats); + size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats); void (*start_stats)(struct efx_nic *efx); void (*pull_stats)(struct efx_nic *efx); void (*stop_stats)(struct efx_nic *efx); - void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); void (*push_irq_moderation)(struct efx_channel *channel); int (*reconfigure_port)(struct efx_nic *efx); void (*prepare_enable_fc_tx)(struct efx_nic *efx); - int (*reconfigure_mac)(struct efx_nic *efx); + int (*reconfigure_mac)(struct efx_nic *efx, bool mtu_only); bool (*check_mac_fault)(struct efx_nic *efx); void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); int (*set_wol)(struct efx_nic *efx, u32 type); void (*resume_wol)(struct efx_nic *efx); + void (*get_fec_stats)(struct efx_nic *efx, + struct ethtool_fec_stats *fec_stats); unsigned int (*check_caps)(const struct efx_nic *efx, u8 flag, u32 offset); @@ -1379,15 +1448,16 @@ struct efx_nic_type { void (*tx_init)(struct efx_tx_queue *tx_queue); void (*tx_remove)(struct efx_tx_queue *tx_queue); void (*tx_write)(struct efx_tx_queue *tx_queue); + netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb); unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len); int (*rx_push_rss_config)(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key); int (*rx_pull_rss_config)(struct efx_nic *efx); int (*rx_push_rss_context_config)(struct efx_nic *efx, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, const u32 *rx_indir_table, - const u8 *key); + const u8 *key, bool delete); int (*rx_pull_rss_context_config)(struct efx_nic *efx, struct efx_rss_context *ctx); void (*rx_restore_rss_contexts)(struct efx_nic *efx); @@ -1396,6 +1466,8 @@ struct efx_nic_type { void (*rx_remove)(struct efx_rx_queue *rx_queue); void (*rx_write)(struct efx_rx_queue *rx_queue); void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); + void (*rx_packet)(struct efx_channel *channel); + bool (*rx_buf_hash_valid)(const u8 *prefix); int (*ev_probe)(struct efx_channel *channel); int (*ev_init)(struct efx_channel *channel); void (*ev_fini)(struct efx_channel *channel); @@ -1440,7 +1512,7 @@ struct efx_nic_type { void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time); int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); int (*ptp_set_ts_config)(struct efx_nic *efx, - struct hwtstamp_config *init); + struct kernel_hwtstamp_config *init); int (*sriov_configure)(struct efx_nic *efx, int num_vfs); int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid); int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid); @@ -1449,9 +1521,7 @@ struct efx_nic_type { int (*sriov_init)(struct efx_nic *efx); void (*sriov_fini)(struct efx_nic *efx); bool (*sriov_wanted)(struct efx_nic *efx); - void (*sriov_reset)(struct efx_nic *efx); - void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i); - int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac); + int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac); int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan, u8 qos); int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i, @@ -1467,11 +1537,11 @@ struct efx_nic_type { int (*set_mac_address)(struct efx_nic *efx); u32 (*tso_versions)(struct efx_nic *efx); int (*udp_tnl_push_ports)(struct efx_nic *efx); - int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port); - int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf, size_t len); + void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev); + unsigned int (*rx_recycle_ring_size)(const struct efx_nic *efx); int revision; unsigned int txd_ptr_tbl_base; @@ -1487,8 +1557,8 @@ struct efx_nic_type { bool can_rx_scatter; bool always_rx_scatter; bool option_descriptors; + bool flash_auto_partition; unsigned int min_interrupt_mode; - unsigned int max_interrupt_mode; unsigned int timer_period_max; netdev_features_t offload_features; int mcdi_max_ver; @@ -1524,12 +1594,11 @@ efx_get_channel(struct efx_nic *efx, unsigned index) _channel = _channel->channel ? \ (_efx)->channel[_channel->channel - 1] : NULL) -static inline struct efx_tx_queue * -efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) +static inline struct efx_channel * +efx_get_tx_channel(struct efx_nic *efx, unsigned int index) { - EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels || - type >= EFX_TXQ_TYPES); - return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; + EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels); + return efx->channel[efx->tx_channel_offset + index]; } static inline struct efx_channel * @@ -1547,21 +1616,29 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel) static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) { - return true; + return channel && channel->channel >= channel->efx->tx_channel_offset; +} + +static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel) +{ + if (efx_channel_is_xdp_tx(channel)) + return channel->efx->xdp_tx_per_channel; + return channel->efx->tx_queues_per_channel; } static inline struct efx_tx_queue * -efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) +efx_channel_get_tx_queue(struct efx_channel *channel, unsigned int type) { - EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_tx_queues(channel) || - type >= EFX_TXQ_TYPES); - return &channel->tx_queue[type]; + EFX_WARN_ON_ONCE_PARANOID(type >= EFX_TXQ_TYPES); + return channel->tx_queue_by_type[type]; } -static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) +static inline struct efx_tx_queue * +efx_get_tx_queue(struct efx_nic *efx, unsigned int index, unsigned int type) { - return !(tx_queue->efx->net_dev->num_tc < 2 && - tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); + struct efx_channel *channel = efx_get_tx_channel(efx, index); + + return efx_channel_get_tx_queue(channel, type); } /* Iterate over all TX queues belonging to a channel */ @@ -1570,18 +1647,8 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) ; \ else \ for (_tx_queue = (_channel)->tx_queue; \ - _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ - (efx_tx_queue_used(_tx_queue) || \ - efx_channel_is_xdp_tx(_channel)); \ - _tx_queue++) - -/* Iterate over all possible TX queues belonging to a channel */ -#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ - if (!efx_channel_has_tx_queues(_channel)) \ - ; \ - else \ - for (_tx_queue = (_channel)->tx_queue; \ - _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ + _tx_queue < (_channel)->tx_queue + \ + efx_channel_num_tx_queues(_channel); \ _tx_queue++) static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) @@ -1664,6 +1731,34 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; } +/* Get the max fill level of the TX queues on this channel */ +static inline unsigned int +efx_channel_tx_fill_level(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + unsigned int fill_level = 0; + + efx_for_each_channel_tx_queue(tx_queue, channel) + fill_level = max(fill_level, + tx_queue->insert_count - tx_queue->read_count); + + return fill_level; +} + +/* Conservative approximation of efx_channel_tx_fill_level using cached value */ +static inline unsigned int +efx_channel_tx_old_fill_level(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + unsigned int fill_level = 0; + + efx_for_each_channel_tx_queue(tx_queue, channel) + fill_level = max(fill_level, + tx_queue->insert_count - tx_queue->old_read_count); + + return fill_level; +} + /* Get all supported features. * If a feature is not fixed, it is present in hw_features. * If a feature is fixed, it does not present in hw_features, but |
