diff options
Diffstat (limited to 'drivers/net/ethernet/google/gve/gve.h')
-rw-r--r-- | drivers/net/ethernet/google/gve/gve.h | 294 |
1 files changed, 195 insertions, 99 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 4814c96d5fe7..2fab38c8ee78 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) * Google virtual Ethernet (gve) driver * - * Copyright (C) 2015-2021 Google, Inc. + * Copyright (C) 2015-2024 Google LLC */ #ifndef _GVE_H_ @@ -13,6 +13,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/u64_stats_sync.h> +#include <net/page_pool/helpers.h> #include <net/xdp.h> #include "gve_desc.h" @@ -50,12 +51,28 @@ /* PTYPEs are always 10 bits. */ #define GVE_NUM_PTYPES 1024 +/* Default minimum ring size */ +#define GVE_DEFAULT_MIN_TX_RING_SIZE 256 +#define GVE_DEFAULT_MIN_RX_RING_SIZE 512 + #define GVE_DEFAULT_RX_BUFFER_SIZE 2048 #define GVE_MAX_RX_BUFFER_SIZE 4096 +#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096 + #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 +#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4 + +#define GVE_FLOW_RULES_CACHE_SIZE \ + (GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule)) +#define GVE_FLOW_RULE_IDS_CACHE_SIZE \ + (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location)) + +#define GVE_RSS_KEY_SIZE 40 +#define GVE_RSS_INDIR_SIZE 128 + #define GVE_XDP_ACTIONS 5 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 @@ -63,7 +80,6 @@ #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128 #define DQO_QPL_DEFAULT_TX_PAGES 512 -#define DQO_QPL_DEFAULT_RX_PAGES 2048 /* Maximum TSO size supported on DQO */ #define GVE_DQO_TX_MAX 0x3FFFF @@ -91,9 +107,16 @@ struct gve_rx_desc_queue { /* The page info for a single slot in the RX data queue */ struct gve_rx_slot_page_info { - struct page *page; + /* netmem is used for DQO RDA mode + * page is used in all other modes + */ + union { + struct page *page; + netmem_ref netmem; + }; void *page_address; u32 page_offset; /* offset to write to in page */ + unsigned int buf_size; int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ u16 pad; /* adjustment for rx padding */ u8 can_flip; /* tracks if the networking stack is using the page */ @@ -206,6 +229,11 @@ struct gve_rx_cnts { /* Contains datapath state used to represent an RX queue. */ struct gve_rx_ring { struct gve_priv *gve; + + u16 packet_buffer_size; /* Size of buffer posted to NIC */ + u16 packet_buffer_truesize; /* Total size of RX buffer */ + u16 rx_headroom; + union { /* GQI fields */ struct { @@ -214,7 +242,6 @@ struct gve_rx_ring { /* threshold for posting new buffs and descs */ u32 db_threshold; - u16 packet_buffer_size; u32 qpl_copy_pool_mask; u32 qpl_copy_pool_head; @@ -265,6 +292,8 @@ struct gve_rx_ring { /* Address info of the buffers for header-split */ struct gve_header_buf hdr_bufs; + + struct page_pool *page_pool; } dqo; }; @@ -590,8 +619,6 @@ struct gve_tx_ring { dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ struct u64_stats_sync statss; /* sync stats for 32bit archs */ struct xsk_buff_pool *xsk_pool; - u32 xdp_xsk_wakeup; - u32 xdp_xsk_done; u64 xdp_xsk_sent; u64 xdp_xmit; u64 xdp_xmit_errors; @@ -607,12 +634,21 @@ struct gve_notify_block { struct gve_priv *priv; struct gve_tx_ring *tx; /* tx rings on this block */ struct gve_rx_ring *rx; /* rx rings on this block */ + u32 irq; +}; + +/* Tracks allowed and current rx queue settings */ +struct gve_rx_queue_config { + u16 max_queues; + u16 num_queues; + u16 packet_buffer_size; }; -/* Tracks allowed and current queue settings */ -struct gve_queue_config { +/* Tracks allowed and current tx queue settings */ +struct gve_tx_queue_config { u16 max_queues; - u16 num_queues; /* current */ + u16 num_queues; /* number of TX queues, excluding XDP queues */ + u16 num_xdp_queues; }; /* Tracks the available and used qpl IDs */ @@ -621,11 +657,6 @@ struct gve_qpl_config { unsigned long *qpl_id_map; /* bitmap of used qpl ids */ }; -struct gve_options_dqo_rda { - u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ - u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ -}; - struct gve_irq_db { __be32 index; } ____cacheline_aligned; @@ -639,31 +670,13 @@ struct gve_ptype_lut { struct gve_ptype ptypes[GVE_NUM_PTYPES]; }; -/* Parameters for allocating queue page lists */ -struct gve_qpls_alloc_cfg { - struct gve_qpl_config *qpl_cfg; - struct gve_queue_config *tx_cfg; - struct gve_queue_config *rx_cfg; - - u16 num_xdp_queues; - bool raw_addressing; - bool is_gqi; - - /* Allocated resources are returned here */ - struct gve_queue_page_list *qpls; -}; - /* Parameters for allocating resources for tx queues */ struct gve_tx_alloc_rings_cfg { - struct gve_queue_config *qcfg; + struct gve_tx_queue_config *qcfg; - /* qpls and qpl_cfg must already be allocated */ - struct gve_queue_page_list *qpls; - struct gve_qpl_config *qpl_cfg; + u16 num_xdp_rings; u16 ring_size; - u16 start_idx; - u16 num_rings; bool raw_addressing; /* Allocated resources are returned here */ @@ -673,17 +686,15 @@ struct gve_tx_alloc_rings_cfg { /* Parameters for allocating resources for rx queues */ struct gve_rx_alloc_rings_cfg { /* tx config is also needed to determine QPL ids */ - struct gve_queue_config *qcfg; - struct gve_queue_config *qcfg_tx; - - /* qpls and qpl_cfg must already be allocated */ - struct gve_queue_page_list *qpls; - struct gve_qpl_config *qpl_cfg; + struct gve_rx_queue_config *qcfg_rx; + struct gve_tx_queue_config *qcfg_tx; u16 ring_size; u16 packet_buffer_size; bool raw_addressing; bool enable_header_split; + bool reset_rss; + bool xdp; /* Allocated resources are returned here */ struct gve_rx_ring *rx; @@ -701,11 +712,48 @@ enum gve_queue_format { GVE_DQO_QPL_FORMAT = 0x4, }; +struct gve_flow_spec { + __be32 src_ip[4]; + __be32 dst_ip[4]; + union { + struct { + __be16 src_port; + __be16 dst_port; + }; + __be32 spi; + }; + union { + u8 tos; + u8 tclass; + }; +}; + +struct gve_flow_rule { + u32 location; + u16 flow_type; + u16 action; + struct gve_flow_spec key; + struct gve_flow_spec mask; +}; + +struct gve_flow_rules_cache { + bool rules_cache_synced; /* False if the driver's rules_cache is outdated */ + struct gve_adminq_queried_flow_rule *rules_cache; + __be32 *rule_ids_cache; + /* The total number of queried rules that stored in the caches */ + u32 rules_cache_num; + u32 rule_ids_cache_num; +}; + +struct gve_rss_config { + u8 *hash_key; + u32 *hash_lut; +}; + struct gve_priv { struct net_device *dev; struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ - struct gve_queue_page_list *qpls; /* array of num qpls */ struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ dma_addr_t irq_db_indices_bus; @@ -718,19 +766,21 @@ struct gve_priv { u16 num_event_counters; u16 tx_desc_cnt; /* num desc per ring */ u16 rx_desc_cnt; /* num desc per ring */ + u16 max_tx_desc_cnt; + u16 max_rx_desc_cnt; + u16 min_tx_desc_cnt; + u16 min_rx_desc_cnt; + bool modify_ring_size_enabled; + bool default_min_ring_size; u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ - u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */ - u16 rx_data_slot_cnt; /* rx buffer length */ u64 max_registered_pages; u64 num_registered_pages; /* num pages registered with NIC */ struct bpf_prog *xdp_prog; /* XDP BPF program */ u32 rx_copybreak; /* copy packets smaller than this */ u16 default_num_queues; /* default num queues to set up */ - u16 num_xdp_queues; - struct gve_queue_config tx_cfg; - struct gve_queue_config rx_cfg; - struct gve_qpl_config qpl_cfg; /* map used QPL ids */ + struct gve_tx_queue_config tx_cfg; + struct gve_rx_queue_config rx_cfg; u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ @@ -745,6 +795,7 @@ struct gve_priv { union gve_adminq_command *adminq; dma_addr_t adminq_bus_addr; struct dma_pool *adminq_pool; + struct mutex adminq_lock; /* Protects adminq command execution */ u32 adminq_mask; /* masks prod_cnt to adminq size */ u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ @@ -764,6 +815,10 @@ struct gve_priv { u32 adminq_report_link_speed_cnt; u32 adminq_get_ptype_map_cnt; u32 adminq_verify_driver_compatibility_cnt; + u32 adminq_query_flow_rules_cnt; + u32 adminq_cfg_flow_rule_cnt; + u32 adminq_cfg_rss_cnt; + u32 adminq_query_rss_cnt; /* Global stats */ u32 interface_up_cnt; /* count of times interface turned up since last reset */ @@ -792,11 +847,9 @@ struct gve_priv { u64 link_speed; bool up_before_suspend; /* True if dev was up before suspend */ - struct gve_options_dqo_rda options_dqo_rda; struct gve_ptype_lut *ptype_lut_dqo; /* Must be a power of two. */ - u16 data_buffer_size_dqo; u16 max_rx_buffer_size; /* device limit */ enum gve_queue_format queue_format; @@ -807,6 +860,16 @@ struct gve_priv { u16 header_buf_size; /* device configured, header-split supported if non-zero */ bool header_split_enabled; /* True if the header split is enabled by the user */ + + u32 max_flow_rules; + u32 num_flow_rules; + + struct gve_flow_rules_cache flow_rules_cache; + + u16 rss_key_size; + u16 rss_lut_size; + bool cache_rss_config; + struct gve_rss_config rss_config; }; enum gve_service_task_flags_bit { @@ -989,27 +1052,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv) } /* Returns the number of tx queue page lists */ -static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg, - int num_xdp_queues, +static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg, bool is_qpl) { if (!is_qpl) return 0; - return tx_cfg->num_queues + num_xdp_queues; -} - -/* Returns the number of XDP tx queue page lists - */ -static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) -{ - if (priv->queue_format != GVE_GQI_QPL_FORMAT) - return 0; - - return priv->num_xdp_queues; + return tx_cfg->num_queues + tx_cfg->num_xdp_queues; } /* Returns the number of rx queue page lists */ -static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg, +static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg, bool is_qpl) { if (!is_qpl) @@ -1027,8 +1079,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) return priv->tx_cfg.max_queues + rx_qid; } -/* Returns the index into priv->qpls where a certain rx queue's QPL resides */ -static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid) +static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg, + int rx_qid) { return tx_cfg->max_queues + rx_qid; } @@ -1038,41 +1090,17 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) return gve_tx_qpl_id(priv, 0); } -/* Returns the index into priv->qpls where the first rx queue's QPL resides */ -static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg) +static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg) { return gve_get_rx_qpl_id(tx_cfg, 0); } -/* Returns a pointer to the next available tx qpl in the list of qpls */ -static inline -struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg, - int tx_qid) +static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt) { - /* QPL already in use */ - if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map)) - return NULL; - set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map); - return &cfg->qpls[tx_qid]; -} - -/* Returns a pointer to the next available rx qpl in the list of qpls */ -static inline -struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg, - int rx_qid) -{ - int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid); - /* QPL already in use */ - if (test_bit(id, cfg->qpl_cfg->qpl_id_map)) - return NULL; - set_bit(id, cfg->qpl_cfg->qpl_id_map); - return &cfg->qpls[id]; -} - -/* Unassigns the qpl with the given id */ -static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id) -{ - clear_bit(id, qpl_cfg->qpl_id_map); + /* For DQO, page count should be more than ring size for + * out-of-order completions. Set it to two times of ring size. + */ + return 2 * rx_desc_cnt; } /* Returns the correct dma direction for tx and rx qpls */ @@ -1093,7 +1121,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv) static inline u32 gve_num_tx_queues(struct gve_priv *priv) { - return priv->tx_cfg.num_queues + priv->num_xdp_queues; + return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues; } static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id) @@ -1106,6 +1134,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv) return gve_xdp_tx_queue_id(priv, 0); } +static inline bool gve_supports_xdp_xmit(struct gve_priv *priv) +{ + switch (priv->queue_format) { + case GVE_GQI_QPL_FORMAT: + return true; + default: + return false; + } +} + /* gqi napi handler defined in gve_main.c */ int gve_napi_poll(struct napi_struct *napi, int budget); @@ -1115,6 +1153,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev, enum dma_data_direction, gfp_t gfp_flags); void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, enum dma_data_direction); +/* qpls */ +struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv, + u32 id, int pages); +void gve_free_queue_page_list(struct gve_priv *priv, + struct gve_queue_page_list *qpl, + u32 id); /* tx handling */ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, @@ -1124,6 +1168,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); bool gve_tx_poll(struct gve_notify_block *block, int budget); bool gve_xdp_poll(struct gve_notify_block *block, int budget); +int gve_xsk_tx_poll(struct gve_notify_block *block, int budget); int gve_tx_alloc_rings_gqi(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg); void gve_tx_free_rings_gqi(struct gve_priv *priv, @@ -1137,7 +1182,12 @@ bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); int gve_rx_poll(struct gve_notify_block *block, int budget); bool gve_rx_work_pending(struct gve_rx_ring *rx); -int gve_rx_alloc_rings(struct gve_priv *priv); +int gve_rx_alloc_ring_gqi(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *cfg, + struct gve_rx_ring *rx, + int idx); +void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_alloc_rings_cfg *cfg); int gve_rx_alloc_rings_gqi(struct gve_priv *priv, struct gve_rx_alloc_rings_cfg *cfg); void gve_rx_free_rings_gqi(struct gve_priv *priv, @@ -1147,12 +1197,58 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit); bool gve_header_split_supported(const struct gve_priv *priv); int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split); +/* rx buffer handling */ +int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs); +void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs, + bool free_page); +struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx); +bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +void gve_free_buf_state(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx, + struct gve_index_list *list); +void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list, + struct gve_rx_buf_state_dqo *buf_state); +struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx); +void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +void gve_free_to_page_pool(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state, + bool allow_direct); +int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state); +void gve_reuse_buffer(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +void gve_free_buffer(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state); +int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc); +struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, + struct gve_rx_ring *rx, + bool xdp); + /* Reset */ void gve_schedule_reset(struct gve_priv *priv); int gve_reset(struct gve_priv *priv, bool attempt_teardown); +void gve_get_curr_alloc_cfgs(struct gve_priv *priv, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); +int gve_adjust_config(struct gve_priv *priv, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); int gve_adjust_queues(struct gve_priv *priv, - struct gve_queue_config new_rx_config, - struct gve_queue_config new_tx_config); + struct gve_rx_queue_config new_rx_config, + struct gve_tx_queue_config new_tx_config, + bool reset_rss); +/* flow steering rule */ +int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd); +int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs); +int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd); +int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd); +int gve_flow_rules_reset(struct gve_priv *priv); +/* RSS config */ +int gve_init_rss_config(struct gve_priv *priv, u16 num_queues); /* report stats handling */ void gve_handle_report_stats(struct gve_priv *priv); /* exported by ethtool.c */ |