diff options
Diffstat (limited to 'include/net/mana/mana.h')
| -rw-r--r-- | include/net/mana/mana.h | 412 |
1 files changed, 395 insertions, 17 deletions
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 3bb579962a14..d7e089c6b694 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -4,6 +4,9 @@ #ifndef _MANA_H #define _MANA_H +#include <net/xdp.h> +#include <net/net_shaper.h> + #include "gdma.h" #include "hw_channel.h" @@ -28,26 +31,42 @@ enum TRI_STATE { }; /* Number of entries for hardware indirection table must be in power of 2 */ -#define MANA_INDIRECT_TABLE_SIZE 64 -#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) +#define MANA_INDIRECT_TABLE_MAX_SIZE 512 +#define MANA_INDIRECT_TABLE_DEF_SIZE 64 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ #define MANA_HASH_KEY_SIZE 40 #define COMP_ENTRY_SIZE 64 -#define ADAPTER_MTU_SIZE 1500 -#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14) +/* This Max value for RX buffers is derived from __alloc_page()'s max page + * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer + * size beyond this value gets rejected by __alloc_page() call. + */ +#define MAX_RX_BUFFERS_PER_QUEUE 8192 +#define DEF_RX_BUFFERS_PER_QUEUE 1024 +#define MIN_RX_BUFFERS_PER_QUEUE 128 -#define RX_BUFFERS_PER_QUEUE 512 +/* This max value for TX buffers is derived as the maximum allocatable + * pages supported on host per guest through testing. TX buffer size beyond + * this value is rejected by the hardware. + */ +#define MAX_TX_BUFFERS_PER_QUEUE 16384 +#define DEF_TX_BUFFERS_PER_QUEUE 256 +#define MIN_TX_BUFFERS_PER_QUEUE 128 -#define MAX_SEND_BUFFERS_PER_QUEUE 256 +#define EQ_SIZE (8 * MANA_PAGE_SIZE) -#define EQ_SIZE (8 * PAGE_SIZE) #define LOG2_EQ_THROTTLE 3 #define MAX_PORTS_IN_MANA_DEV 256 +/* Update this count whenever the respective structures are changed */ +#define MANA_STATS_RX_COUNT 5 +#define MANA_STATS_TX_COUNT 11 + +#define MANA_RX_FRAG_ALIGNMENT 64 + struct mana_stats_rx { u64 packets; u64 bytes; @@ -61,6 +80,14 @@ struct mana_stats_tx { u64 packets; u64 bytes; u64 xdp_xmit; + u64 tso_packets; + u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 short_pkt_fmt; + u64 long_pkt_fmt; + u64 csum_partial; + u64 mana_map_err; struct u64_stats_sync syncp; }; @@ -86,14 +113,17 @@ struct mana_txq { atomic_t pending_sends; + bool napi_initialized; + struct mana_stats_tx stats; }; /* skb data and frags dma mappings */ struct mana_skb_head { - dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; + /* GSO pkts may have 2 SGEs for the linear part*/ + dma_addr_t dma_handle[MAX_SKB_FRAGS + 2]; - u32 size[MAX_SKB_FRAGS + 1]; + u32 size[MAX_SKB_FRAGS + 2]; }; #define MANA_HEADROOM sizeof(struct mana_skb_head) @@ -262,6 +292,7 @@ struct mana_cq { /* NAPI data */ struct napi_struct napi; int work_done; + int work_done_since_doorbell; int budget; }; @@ -270,9 +301,9 @@ struct mana_recv_buf_oob { struct gdma_wqe_request wqe_req; void *buf_va; - dma_addr_t buf_dma_addr; + bool from_pool; /* allocated from a page pool */ - /* SGL of the buffer going to be sent has part of the work request. */ + /* SGL of the buffer going to be sent as part of the work request. */ u32 num_sge; struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; @@ -283,6 +314,11 @@ struct mana_recv_buf_oob { struct gdma_posted_wqe_info wqe_inf; }; +#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ + + ETH_HLEN) + +#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) + struct mana_rxq { struct gdma_queue *gdma_rq; /* Cache the gdma receive queue id */ @@ -292,6 +328,9 @@ struct mana_rxq { u32 rxq_idx; u32 datasize; + u32 alloc_size; + u32 headroom; + u32 frag_count; mana_handle_t rxobj; @@ -310,14 +349,17 @@ struct mana_rxq { struct bpf_prog __rcu *bpf_prog; struct xdp_rxq_info xdp_rxq; - struct page *xdp_save_page; + void *xdp_save_va; /* for reusing */ bool xdp_flush; int xdp_rc; /* XDP redirect return code */ + struct page_pool *page_pool; + struct dentry *mana_rx_debugfs; + /* MUST BE THE LAST MEMBER: * Each receive buffer has an associated mana_recv_buf_oob. */ - struct mana_recv_buf_oob rx_oobs[]; + struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); }; struct mana_tx_qp { @@ -326,21 +368,128 @@ struct mana_tx_qp { struct mana_cq tx_cq; mana_handle_t tx_object; + + struct dentry *mana_tx_debugfs; }; struct mana_ethtool_stats { u64 stop_queue; u64 wake_queue; + u64 tx_cqe_err; + u64 tx_cqe_unknown_type; + u64 tx_linear_pkt_cnt; + u64 rx_coalesced_err; + u64 rx_cqe_unknown_type; +}; + +struct mana_ethtool_hc_stats { + u64 hc_rx_discards_no_wqe; + u64 hc_rx_err_vport_disabled; + u64 hc_rx_bytes; + u64 hc_rx_ucast_pkts; + u64 hc_rx_ucast_bytes; + u64 hc_rx_bcast_pkts; + u64 hc_rx_bcast_bytes; + u64 hc_rx_mcast_pkts; + u64 hc_rx_mcast_bytes; + u64 hc_tx_err_gf_disabled; + u64 hc_tx_err_vport_disabled; + u64 hc_tx_err_inval_vportoffset_pkt; + u64 hc_tx_err_vlan_enforcement; + u64 hc_tx_err_eth_type_enforcement; + u64 hc_tx_err_sa_enforcement; + u64 hc_tx_err_sqpdid_enforcement; + u64 hc_tx_err_cqpdid_enforcement; + u64 hc_tx_err_mtu_violation; + u64 hc_tx_err_inval_oob; + u64 hc_tx_bytes; + u64 hc_tx_ucast_pkts; + u64 hc_tx_ucast_bytes; + u64 hc_tx_bcast_pkts; + u64 hc_tx_bcast_bytes; + u64 hc_tx_mcast_pkts; + u64 hc_tx_mcast_bytes; + u64 hc_tx_err_gdma; +}; + +struct mana_ethtool_phy_stats { + /* Drop Counters */ + u64 rx_pkt_drop_phy; + u64 tx_pkt_drop_phy; + + /* Per TC traffic Counters */ + u64 rx_pkt_tc0_phy; + u64 tx_pkt_tc0_phy; + u64 rx_pkt_tc1_phy; + u64 tx_pkt_tc1_phy; + u64 rx_pkt_tc2_phy; + u64 tx_pkt_tc2_phy; + u64 rx_pkt_tc3_phy; + u64 tx_pkt_tc3_phy; + u64 rx_pkt_tc4_phy; + u64 tx_pkt_tc4_phy; + u64 rx_pkt_tc5_phy; + u64 tx_pkt_tc5_phy; + u64 rx_pkt_tc6_phy; + u64 tx_pkt_tc6_phy; + u64 rx_pkt_tc7_phy; + u64 tx_pkt_tc7_phy; + + u64 rx_byte_tc0_phy; + u64 tx_byte_tc0_phy; + u64 rx_byte_tc1_phy; + u64 tx_byte_tc1_phy; + u64 rx_byte_tc2_phy; + u64 tx_byte_tc2_phy; + u64 rx_byte_tc3_phy; + u64 tx_byte_tc3_phy; + u64 rx_byte_tc4_phy; + u64 tx_byte_tc4_phy; + u64 rx_byte_tc5_phy; + u64 tx_byte_tc5_phy; + u64 rx_byte_tc6_phy; + u64 tx_byte_tc6_phy; + u64 rx_byte_tc7_phy; + u64 tx_byte_tc7_phy; + + /* Per TC pause Counters */ + u64 rx_pause_tc0_phy; + u64 tx_pause_tc0_phy; + u64 rx_pause_tc1_phy; + u64 tx_pause_tc1_phy; + u64 rx_pause_tc2_phy; + u64 tx_pause_tc2_phy; + u64 rx_pause_tc3_phy; + u64 tx_pause_tc3_phy; + u64 rx_pause_tc4_phy; + u64 tx_pause_tc4_phy; + u64 rx_pause_tc5_phy; + u64 tx_pause_tc5_phy; + u64 rx_pause_tc6_phy; + u64 tx_pause_tc6_phy; + u64 rx_pause_tc7_phy; + u64 tx_pause_tc7_phy; }; struct mana_context { struct gdma_dev *gdma_dev; u16 num_ports; + u8 bm_hostmode; + struct mana_ethtool_hc_stats hc_stats; struct mana_eq *eqs; + struct dentry *mana_eqs_debugfs; + + /* Workqueue for querying hardware stats */ + struct delayed_work gf_stats_work; + bool hwc_timeout_occurred; struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; + + /* Link state change work */ + struct work_struct link_change_work; + u32 link_event; }; struct mana_port_context { @@ -358,10 +507,11 @@ struct mana_port_context { struct mana_tx_qp *tx_qp; /* Indirection Table for RX & TX. The values are queue indexes */ - u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; + u32 *indir_table; + u32 indir_table_sz; /* Indirection table containing RxObject Handles */ - mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; + mana_handle_t *rxobj_table; /* Hash key used by the NIC */ u8 hashkey[MANA_HASH_KEY_SIZE]; @@ -369,12 +519,24 @@ struct mana_port_context { /* This points to an array of num_queues of RQ pointers. */ struct mana_rxq **rxqs; + /* pre-allocated rx buffer array */ + void **rxbufs_pre; + dma_addr_t *das_pre; + int rxbpre_total; + u32 rxbpre_datasize; + u32 rxbpre_alloc_size; + u32 rxbpre_headroom; + u32 rxbpre_frag_count; + struct bpf_prog *bpf_prog; /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ unsigned int max_queues; unsigned int num_queues; + unsigned int rx_queue_size; + unsigned int tx_queue_size; + mana_handle_t port_handle; mana_handle_t pf_filter_handle; @@ -382,12 +544,24 @@ struct mana_port_context { struct mutex vport_mutex; int vport_use_count; + /* Net shaper handle*/ + struct net_shaper_handle handle; + u16 port_idx; + /* Currently configured speed (mbps) */ + u32 speed; + /* Maximum speed supported by the SKU (mbps) */ + u32 max_speed; bool port_is_up; bool port_st_save; /* Saved port state */ struct mana_ethtool_stats eth_stats; + + struct mana_ethtool_phy_stats phy_stats; + + /* Debugfs */ + struct dentry *mana_port_debugfs; }; netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); @@ -401,6 +575,9 @@ int mana_detach(struct net_device *ndev, bool from_close); int mana_probe(struct gdma_dev *gd, bool resuming); void mana_remove(struct gdma_dev *gd, bool suspending); +int mana_rdma_probe(struct gdma_dev *gd); +void mana_rdma_remove(struct gdma_dev *gd); + void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, u32 flags); @@ -409,8 +586,17 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); +int mana_query_gf_stats(struct mana_context *ac); +int mana_query_link_cfg(struct mana_port_context *apc); +int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, + int enable_clamping); +void mana_query_phy_stats(struct mana_port_context *apc); +int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); +void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); +void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc); extern const struct ethtool_ops mana_ethtool_ops; +extern struct dentry *mana_debugfs_root; /* A CQ can be created not associated with any EQ */ #define GDMA_CQ_NO_EQ 0xffff @@ -432,6 +618,9 @@ enum mana_command_code { MANA_FENCE_RQ = 0x20006, MANA_CONFIG_VPORT_RX = 0x20007, MANA_QUERY_VPORT_CONFIG = 0x20008, + MANA_QUERY_LINK_CONFIG = 0x2000A, + MANA_SET_BW_CLAMP = 0x2000B, + MANA_QUERY_PHY_STAT = 0x2000c, /* Privileged commands for the PF mode */ MANA_REGISTER_FILTER = 0x28000, @@ -440,6 +629,35 @@ enum mana_command_code { MANA_DEREGISTER_HW_PORT = 0x28004, }; +/* Query Link Configuration*/ +struct mana_query_link_config_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; +}; /* HW DATA */ + +struct mana_query_link_config_resp { + struct gdma_resp_hdr hdr; + u32 qos_speed_mbps; + u8 qos_unconfigured; + u8 reserved1[3]; + u32 link_speed_mbps; + u8 reserved2[4]; +}; /* HW DATA */ + +/* Set Bandwidth Clamp*/ +struct mana_set_bw_clamp_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + enum TRI_STATE enable_clamping; + u32 link_speed_mbps; +}; /* HW DATA */ + +struct mana_set_bw_clamp_resp { + struct gdma_resp_hdr hdr; + u8 qos_unconfigured; + u8 reserved[7]; +}; /* HW DATA */ + /* Query Device Configuration */ struct mana_query_device_cfg_req { struct gdma_req_hdr hdr; @@ -466,8 +684,14 @@ struct mana_query_device_cfg_resp { u64 pf_cap_flags4; u16 max_num_vports; - u16 reserved; + u8 bm_hostmode; /* response v3: Bare Metal Host Mode */ + u8 reserved; u32 max_num_eqs; + + /* response v2: */ + u16 adapter_mtu; + u16 reserved2; + u32 reserved3; }; /* HW DATA */ /* Query vPort Configuration */ @@ -545,8 +769,119 @@ struct mana_fence_rq_resp { struct gdma_resp_hdr hdr; }; /* HW DATA */ +/* Query stats RQ */ +struct mana_query_gf_stat_req { + struct gdma_req_hdr hdr; + u64 req_stats; +}; /* HW DATA */ + +struct mana_query_gf_stat_resp { + struct gdma_resp_hdr hdr; + u64 reported_stats; + /* rx errors/discards */ + u64 rx_discards_nowqe; + u64 rx_err_vport_disabled; + /* rx bytes/packets */ + u64 hc_rx_bytes; + u64 hc_rx_ucast_pkts; + u64 hc_rx_ucast_bytes; + u64 hc_rx_bcast_pkts; + u64 hc_rx_bcast_bytes; + u64 hc_rx_mcast_pkts; + u64 hc_rx_mcast_bytes; + /* tx errors */ + u64 tx_err_gf_disabled; + u64 tx_err_vport_disabled; + u64 tx_err_inval_vport_offset_pkt; + u64 tx_err_vlan_enforcement; + u64 tx_err_ethtype_enforcement; + u64 tx_err_SA_enforcement; + u64 tx_err_SQPDID_enforcement; + u64 tx_err_CQPDID_enforcement; + u64 tx_err_mtu_violation; + u64 tx_err_inval_oob; + /* tx bytes/packets */ + u64 hc_tx_bytes; + u64 hc_tx_ucast_pkts; + u64 hc_tx_ucast_bytes; + u64 hc_tx_bcast_pkts; + u64 hc_tx_bcast_bytes; + u64 hc_tx_mcast_pkts; + u64 hc_tx_mcast_bytes; + /* tx error */ + u64 tx_err_gdma; +}; /* HW DATA */ + +/* Query phy stats */ +struct mana_query_phy_stat_req { + struct gdma_req_hdr hdr; + u64 req_stats; +}; /* HW DATA */ + +struct mana_query_phy_stat_resp { + struct gdma_resp_hdr hdr; + u64 reported_stats; + + /* Aggregate Drop Counters */ + u64 rx_pkt_drop_phy; + u64 tx_pkt_drop_phy; + + /* Per TC(Traffic class) traffic Counters */ + u64 rx_pkt_tc0_phy; + u64 tx_pkt_tc0_phy; + u64 rx_pkt_tc1_phy; + u64 tx_pkt_tc1_phy; + u64 rx_pkt_tc2_phy; + u64 tx_pkt_tc2_phy; + u64 rx_pkt_tc3_phy; + u64 tx_pkt_tc3_phy; + u64 rx_pkt_tc4_phy; + u64 tx_pkt_tc4_phy; + u64 rx_pkt_tc5_phy; + u64 tx_pkt_tc5_phy; + u64 rx_pkt_tc6_phy; + u64 tx_pkt_tc6_phy; + u64 rx_pkt_tc7_phy; + u64 tx_pkt_tc7_phy; + + u64 rx_byte_tc0_phy; + u64 tx_byte_tc0_phy; + u64 rx_byte_tc1_phy; + u64 tx_byte_tc1_phy; + u64 rx_byte_tc2_phy; + u64 tx_byte_tc2_phy; + u64 rx_byte_tc3_phy; + u64 tx_byte_tc3_phy; + u64 rx_byte_tc4_phy; + u64 tx_byte_tc4_phy; + u64 rx_byte_tc5_phy; + u64 tx_byte_tc5_phy; + u64 rx_byte_tc6_phy; + u64 tx_byte_tc6_phy; + u64 rx_byte_tc7_phy; + u64 tx_byte_tc7_phy; + + /* Per TC(Traffic Class) pause Counters */ + u64 rx_pause_tc0_phy; + u64 tx_pause_tc0_phy; + u64 rx_pause_tc1_phy; + u64 tx_pause_tc1_phy; + u64 rx_pause_tc2_phy; + u64 tx_pause_tc2_phy; + u64 rx_pause_tc3_phy; + u64 tx_pause_tc3_phy; + u64 rx_pause_tc4_phy; + u64 tx_pause_tc4_phy; + u64 rx_pause_tc5_phy; + u64 tx_pause_tc5_phy; + u64 rx_pause_tc6_phy; + u64 tx_pause_tc6_phy; + u64 rx_pause_tc7_phy; + u64 tx_pause_tc7_phy; +}; /* HW DATA */ + /* Configure vPort Rx Steering */ -struct mana_cfg_rx_steer_req { +struct mana_cfg_rx_steer_req_v2 { struct gdma_req_hdr hdr; mana_handle_t vport; u16 num_indir_entries; @@ -559,6 +894,9 @@ struct mana_cfg_rx_steer_req { u8 reserved; mana_handle_t default_rxobj; u8 hashkey[MANA_HASH_KEY_SIZE]; + u8 cqe_coalescing_enable; + u8 reserved2[7]; + mana_handle_t indir_tab[] __counted_by(num_indir_entries); }; /* HW DATA */ struct mana_cfg_rx_steer_resp { @@ -622,6 +960,42 @@ struct mana_deregister_filter_resp { struct gdma_resp_hdr hdr; }; /* HW DATA */ +/* Requested GF stats Flags */ +/* Rx discards/Errors */ +#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001 +#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002 +/* Rx bytes/pkts */ +#define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004 +#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008 +#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010 +#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020 +#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040 +#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080 +#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100 +/* Tx errors */ +#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200 +#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400 +#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \ + 0x0000000000000800 +#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000 +#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \ + 0x0000000000002000 +#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000 +#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000 +#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000 +#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000 +#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000 +/* Tx bytes/pkts */ +#define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000 +#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000 +#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000 +#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000 +#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000 +#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000 +#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000 +/* Tx error */ +#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000 + #define MANA_MAX_NUM_QUEUES 64 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) @@ -648,4 +1022,8 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, u32 doorbell_pg_id); void mana_uncfg_vport(struct mana_port_context *apc); + +struct net_device *mana_get_primary_netdev(struct mana_context *ac, + u32 port_index, + netdevice_tracker *tracker); #endif /* _MANA_H */ |
