summaryrefslogtreecommitdiff
path: root/include/net/mana
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/mana')
-rw-r--r--include/net/mana/gdma.h159
-rw-r--r--include/net/mana/hw_channel.h18
-rw-r--r--include/net/mana/mana.h412
3 files changed, 549 insertions, 40 deletions
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 56189e4252da..eaa27483f99b 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -10,6 +10,7 @@
#include "shm_channel.h"
#define GDMA_STATUS_MORE_ENTRIES 0x00000105
+#define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
@@ -33,6 +34,7 @@ enum gdma_request_type {
GDMA_DESTROY_PD = 30,
GDMA_CREATE_MR = 31,
GDMA_DESTROY_MR = 32,
+ GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
};
#define GDMA_RESOURCE_DOORBELL_PAGE 27
@@ -57,12 +59,30 @@ enum gdma_eqe_type {
GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
GDMA_EQE_HWC_INIT_DATA = 130,
GDMA_EQE_HWC_INIT_DONE = 131,
+ GDMA_EQE_HWC_FPGA_RECONFIG = 132,
+ GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
+ GDMA_EQE_HWC_SOC_SERVICE = 134,
+ GDMA_EQE_HWC_RESET_REQUEST = 135,
+ GDMA_EQE_RNIC_QP_FATAL = 176,
};
enum {
GDMA_DEVICE_NONE = 0,
GDMA_DEVICE_HWC = 1,
GDMA_DEVICE_MANA = 2,
+ GDMA_DEVICE_MANA_IB = 3,
+};
+
+enum gdma_service_type {
+ GDMA_SERVICE_TYPE_NONE = 0,
+ GDMA_SERVICE_TYPE_RDMA_SUSPEND = 1,
+ GDMA_SERVICE_TYPE_RDMA_RESUME = 2,
+};
+
+struct mana_service_work {
+ struct work_struct work;
+ struct gdma_dev *gdma_dev;
+ enum gdma_service_type event;
};
struct gdma_resource {
@@ -145,6 +165,9 @@ struct gdma_general_req {
}; /* HW DATA */
#define GDMA_MESSAGE_V1 1
+#define GDMA_MESSAGE_V2 2
+#define GDMA_MESSAGE_V3 3
+#define GDMA_MESSAGE_V4 4
struct gdma_general_resp {
struct gdma_resp_hdr hdr;
@@ -216,9 +239,19 @@ struct gdma_dev {
void *driver_data;
struct auxiliary_device *adev;
+ bool is_suspended;
+ bool rdma_teardown;
};
-#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
+/* MANA_PAGE_SIZE is the DMA unit */
+#define MANA_PAGE_SHIFT 12
+#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
+#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
+#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
+#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
+
+/* Required by HW */
+#define MANA_MIN_QSIZE MANA_PAGE_SIZE
#define GDMA_CQE_SIZE 64
#define GDMA_EQE_SIZE 16
@@ -252,7 +285,8 @@ struct gdma_event {
struct gdma_queue;
struct mana_eq {
- struct gdma_queue *eq;
+ struct gdma_queue *eq;
+ struct dentry *mana_eq_debugfs;
};
typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
@@ -289,6 +323,7 @@ struct gdma_queue {
u32 head;
u32 tail;
+ struct list_head entry;
/* Extra fields specific to EQ/CQ. */
union {
@@ -324,6 +359,7 @@ struct gdma_queue_spec {
void *context;
unsigned long log2_throttle_limit;
+ unsigned int msix_index;
} eq;
struct {
@@ -340,19 +376,28 @@ struct gdma_queue_spec {
struct gdma_irq_context {
void (*handler)(void *arg);
- void *arg;
+ /* Protect the eq_list */
+ spinlock_t lock;
+ struct list_head eq_list;
char name[MANA_IRQ_NAME_SZ];
};
+enum gdma_context_flags {
+ GC_PROBE_SUCCEEDED = 0,
+};
+
struct gdma_context {
struct device *dev;
+ struct dentry *mana_pci_debugfs;
/* Per-vPort max number of queues */
unsigned int max_num_queues;
unsigned int max_num_msix;
unsigned int num_msix_usable;
- struct gdma_resource msix_resource;
- struct gdma_irq_context *irq_contexts;
+ struct xarray irq_contexts;
+
+ /* L2 MTU */
+ u16 adapter_mtu;
/* This maps a CQ index to the queue structure. */
unsigned int max_num_cqs;
@@ -364,6 +409,8 @@ struct gdma_context {
u32 test_event_eq_id;
bool is_pf;
+ bool in_service;
+
phys_addr_t bar0_pa;
void __iomem *bar0_va;
void __iomem *shm_base;
@@ -380,9 +427,16 @@ struct gdma_context {
/* Azure network adapter */
struct gdma_dev mana;
-};
-#define MAX_NUM_GDMA_DEVICES 4
+ /* Azure RDMA adapter */
+ struct gdma_dev mana_ib;
+
+ u64 pf_cap_flags1;
+
+ struct workqueue_struct *service_wq;
+
+ unsigned long flags;
+};
static inline bool mana_gd_is_mana(struct gdma_dev *gd)
{
@@ -438,6 +492,8 @@ struct gdma_wqe {
#define INLINE_OOB_SMALL_SIZE 8
#define INLINE_OOB_LARGE_SIZE 24
+#define MANA_MAX_TX_WQE_SGL_ENTRIES 30
+
#define MAX_TX_WQE_SIZE 512
#define MAX_RX_WQE_SIZE 256
@@ -527,10 +583,45 @@ enum {
* so the driver is able to reliably support features like busy_poll.
*/
#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
+#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
+#define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
+#define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
+
+/* Driver can handle holes (zeros) in the device list */
+#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
+
+/* Driver supports dynamic MSI-X vector allocation */
+#define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
+
+/* Driver can self reset on EQE notification */
+#define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
+
+/* Driver can self reset on FPGA Reconfig EQE notification */
+#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
+#define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
+
+/* Driver supports linearizing the skb when num_sge exceeds hardware limit */
+#define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20)
+
+/* Driver can send HWC periodically to query stats */
+#define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21)
+
+/* Driver can handle hardware recovery events during probe */
+#define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22)
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
- GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
+ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
+ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
+ GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
+ GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
+ GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
+ GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
+ GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \
+ GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \
+ GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
+ GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
+ GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY)
#define GDMA_DRV_CAP_FLAGS2 0
@@ -591,11 +682,12 @@ struct gdma_query_max_resources_resp {
}; /* HW DATA */
/* GDMA_LIST_DEVICES */
+#define GDMA_DEV_LIST_SIZE 64
struct gdma_list_devices_resp {
struct gdma_resp_hdr hdr;
u32 num_of_devs;
u32 reserved;
- struct gdma_dev_id devs[64];
+ struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
}; /* HW DATA */
/* GDMA_REGISTER_DEVICE */
@@ -660,18 +752,17 @@ struct gdma_disable_queue_req {
u32 alloc_res_id_on_creation;
}; /* HW DATA */
-enum atb_page_size {
- ATB_PAGE_SIZE_4K,
- ATB_PAGE_SIZE_8K,
- ATB_PAGE_SIZE_16K,
- ATB_PAGE_SIZE_32K,
- ATB_PAGE_SIZE_64K,
- ATB_PAGE_SIZE_128K,
- ATB_PAGE_SIZE_256K,
- ATB_PAGE_SIZE_512K,
- ATB_PAGE_SIZE_1M,
- ATB_PAGE_SIZE_2M,
- ATB_PAGE_SIZE_MAX,
+/* GDMA_QUERY_HWC_TIMEOUT */
+struct gdma_query_hwc_timeout_req {
+ struct gdma_req_hdr hdr;
+ u32 timeout_ms;
+ u32 reserved;
+};
+
+struct gdma_query_hwc_timeout_resp {
+ struct gdma_resp_hdr hdr;
+ u32 timeout_ms;
+ u32 reserved;
};
enum gdma_mr_access_flags {
@@ -732,6 +823,7 @@ struct gdma_destroy_dma_region_req {
enum gdma_pd_flags {
GDMA_PD_FLAG_INVALID = 0,
+ GDMA_PD_FLAG_ALLOW_GPA_MR = 1,
};
struct gdma_create_pd_req {
@@ -757,11 +849,18 @@ struct gdma_destory_pd_resp {
};/* HW DATA */
enum gdma_mr_type {
+ /*
+ * Guest Physical Address - MRs of this type allow access
+ * to any DMA-mapped memory using bus-logical address
+ */
+ GDMA_MR_TYPE_GPA = 1,
/* Guest Virtual Address - MRs of this type allow access
* to memory mapped by PTEs associated with this MR using a virtual
* address that is set up in the MST
*/
GDMA_MR_TYPE_GVA = 2,
+ /* Guest zero-based address MRs */
+ GDMA_MR_TYPE_ZBVA = 4,
};
struct gdma_create_mr_params {
@@ -773,6 +872,10 @@ struct gdma_create_mr_params {
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
+ struct {
+ u64 dma_region_handle;
+ enum gdma_mr_access_flags access_flags;
+ } zbva;
};
};
@@ -788,7 +891,10 @@ struct gdma_create_mr_request {
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
-
+ struct {
+ u64 dma_region_handle;
+ enum gdma_mr_access_flags access_flags;
+ } zbva;
};
u32 reserved_2;
};/* HW DATA */
@@ -837,5 +943,14 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
u32 resp_len, void *resp);
int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
+void mana_register_debugfs(void);
+void mana_unregister_debugfs(void);
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
+
+int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
+int mana_gd_resume(struct pci_dev *pdev);
+
+bool mana_need_log(struct gdma_context *gc, int err);
#endif /* _GDMA_H */
diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h
index 6a757a6e2732..16feb39616c1 100644
--- a/include/net/mana/hw_channel.h
+++ b/include/net/mana/hw_channel.h
@@ -23,6 +23,12 @@
#define HWC_INIT_DATA_PF_DEST_RQ_ID 10
#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
+#define HWC_DATA_CFG_HWC_TIMEOUT 1
+#define HWC_DATA_HW_LINK_CONNECT 2
+#define HWC_DATA_HW_LINK_DISCONNECT 3
+
+#define HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS 30000
+
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
*/
@@ -45,6 +51,15 @@ union hwc_init_type_data {
};
}; /* HW DATA */
+union hwc_init_soc_service_type {
+ u32 as_uint32;
+
+ struct {
+ u32 value : 28;
+ u32 type : 4;
+ };
+}; /* HW DATA */
+
struct hwc_rx_oob {
u32 type : 6;
u32 eom : 1;
@@ -117,7 +132,7 @@ struct hwc_dma_buf {
u32 gpa_mkey;
u32 num_reqs;
- struct hwc_work_request reqs[];
+ struct hwc_work_request reqs[] __counted_by(num_reqs);
};
typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id,
@@ -182,6 +197,7 @@ struct hw_channel_context {
u32 pf_dest_vrq_id;
u32 pf_dest_vrcq_id;
+ u32 hwc_timeout;
struct hwc_caller_ctx *caller_ctx;
};
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 3bb579962a14..d7e089c6b694 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -4,6 +4,9 @@
#ifndef _MANA_H
#define _MANA_H
+#include <net/xdp.h>
+#include <net/net_shaper.h>
+
#include "gdma.h"
#include "hw_channel.h"
@@ -28,26 +31,42 @@ enum TRI_STATE {
};
/* Number of entries for hardware indirection table must be in power of 2 */
-#define MANA_INDIRECT_TABLE_SIZE 64
-#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
+#define MANA_INDIRECT_TABLE_MAX_SIZE 512
+#define MANA_INDIRECT_TABLE_DEF_SIZE 64
/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
#define MANA_HASH_KEY_SIZE 40
#define COMP_ENTRY_SIZE 64
-#define ADAPTER_MTU_SIZE 1500
-#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
+/* This Max value for RX buffers is derived from __alloc_page()'s max page
+ * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer
+ * size beyond this value gets rejected by __alloc_page() call.
+ */
+#define MAX_RX_BUFFERS_PER_QUEUE 8192
+#define DEF_RX_BUFFERS_PER_QUEUE 1024
+#define MIN_RX_BUFFERS_PER_QUEUE 128
-#define RX_BUFFERS_PER_QUEUE 512
+/* This max value for TX buffers is derived as the maximum allocatable
+ * pages supported on host per guest through testing. TX buffer size beyond
+ * this value is rejected by the hardware.
+ */
+#define MAX_TX_BUFFERS_PER_QUEUE 16384
+#define DEF_TX_BUFFERS_PER_QUEUE 256
+#define MIN_TX_BUFFERS_PER_QUEUE 128
-#define MAX_SEND_BUFFERS_PER_QUEUE 256
+#define EQ_SIZE (8 * MANA_PAGE_SIZE)
-#define EQ_SIZE (8 * PAGE_SIZE)
#define LOG2_EQ_THROTTLE 3
#define MAX_PORTS_IN_MANA_DEV 256
+/* Update this count whenever the respective structures are changed */
+#define MANA_STATS_RX_COUNT 5
+#define MANA_STATS_TX_COUNT 11
+
+#define MANA_RX_FRAG_ALIGNMENT 64
+
struct mana_stats_rx {
u64 packets;
u64 bytes;
@@ -61,6 +80,14 @@ struct mana_stats_tx {
u64 packets;
u64 bytes;
u64 xdp_xmit;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 tso_inner_packets;
+ u64 tso_inner_bytes;
+ u64 short_pkt_fmt;
+ u64 long_pkt_fmt;
+ u64 csum_partial;
+ u64 mana_map_err;
struct u64_stats_sync syncp;
};
@@ -86,14 +113,17 @@ struct mana_txq {
atomic_t pending_sends;
+ bool napi_initialized;
+
struct mana_stats_tx stats;
};
/* skb data and frags dma mappings */
struct mana_skb_head {
- dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
+ /* GSO pkts may have 2 SGEs for the linear part*/
+ dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
- u32 size[MAX_SKB_FRAGS + 1];
+ u32 size[MAX_SKB_FRAGS + 2];
};
#define MANA_HEADROOM sizeof(struct mana_skb_head)
@@ -262,6 +292,7 @@ struct mana_cq {
/* NAPI data */
struct napi_struct napi;
int work_done;
+ int work_done_since_doorbell;
int budget;
};
@@ -270,9 +301,9 @@ struct mana_recv_buf_oob {
struct gdma_wqe_request wqe_req;
void *buf_va;
- dma_addr_t buf_dma_addr;
+ bool from_pool; /* allocated from a page pool */
- /* SGL of the buffer going to be sent has part of the work request. */
+ /* SGL of the buffer going to be sent as part of the work request. */
u32 num_sge;
struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
@@ -283,6 +314,11 @@ struct mana_recv_buf_oob {
struct gdma_posted_wqe_info wqe_inf;
};
+#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
+ + ETH_HLEN)
+
+#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
+
struct mana_rxq {
struct gdma_queue *gdma_rq;
/* Cache the gdma receive queue id */
@@ -292,6 +328,9 @@ struct mana_rxq {
u32 rxq_idx;
u32 datasize;
+ u32 alloc_size;
+ u32 headroom;
+ u32 frag_count;
mana_handle_t rxobj;
@@ -310,14 +349,17 @@ struct mana_rxq {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
- struct page *xdp_save_page;
+ void *xdp_save_va; /* for reusing */
bool xdp_flush;
int xdp_rc; /* XDP redirect return code */
+ struct page_pool *page_pool;
+ struct dentry *mana_rx_debugfs;
+
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
*/
- struct mana_recv_buf_oob rx_oobs[];
+ struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
};
struct mana_tx_qp {
@@ -326,21 +368,128 @@ struct mana_tx_qp {
struct mana_cq tx_cq;
mana_handle_t tx_object;
+
+ struct dentry *mana_tx_debugfs;
};
struct mana_ethtool_stats {
u64 stop_queue;
u64 wake_queue;
+ u64 tx_cqe_err;
+ u64 tx_cqe_unknown_type;
+ u64 tx_linear_pkt_cnt;
+ u64 rx_coalesced_err;
+ u64 rx_cqe_unknown_type;
+};
+
+struct mana_ethtool_hc_stats {
+ u64 hc_rx_discards_no_wqe;
+ u64 hc_rx_err_vport_disabled;
+ u64 hc_rx_bytes;
+ u64 hc_rx_ucast_pkts;
+ u64 hc_rx_ucast_bytes;
+ u64 hc_rx_bcast_pkts;
+ u64 hc_rx_bcast_bytes;
+ u64 hc_rx_mcast_pkts;
+ u64 hc_rx_mcast_bytes;
+ u64 hc_tx_err_gf_disabled;
+ u64 hc_tx_err_vport_disabled;
+ u64 hc_tx_err_inval_vportoffset_pkt;
+ u64 hc_tx_err_vlan_enforcement;
+ u64 hc_tx_err_eth_type_enforcement;
+ u64 hc_tx_err_sa_enforcement;
+ u64 hc_tx_err_sqpdid_enforcement;
+ u64 hc_tx_err_cqpdid_enforcement;
+ u64 hc_tx_err_mtu_violation;
+ u64 hc_tx_err_inval_oob;
+ u64 hc_tx_bytes;
+ u64 hc_tx_ucast_pkts;
+ u64 hc_tx_ucast_bytes;
+ u64 hc_tx_bcast_pkts;
+ u64 hc_tx_bcast_bytes;
+ u64 hc_tx_mcast_pkts;
+ u64 hc_tx_mcast_bytes;
+ u64 hc_tx_err_gdma;
+};
+
+struct mana_ethtool_phy_stats {
+ /* Drop Counters */
+ u64 rx_pkt_drop_phy;
+ u64 tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ u64 rx_pkt_tc0_phy;
+ u64 tx_pkt_tc0_phy;
+ u64 rx_pkt_tc1_phy;
+ u64 tx_pkt_tc1_phy;
+ u64 rx_pkt_tc2_phy;
+ u64 tx_pkt_tc2_phy;
+ u64 rx_pkt_tc3_phy;
+ u64 tx_pkt_tc3_phy;
+ u64 rx_pkt_tc4_phy;
+ u64 tx_pkt_tc4_phy;
+ u64 rx_pkt_tc5_phy;
+ u64 tx_pkt_tc5_phy;
+ u64 rx_pkt_tc6_phy;
+ u64 tx_pkt_tc6_phy;
+ u64 rx_pkt_tc7_phy;
+ u64 tx_pkt_tc7_phy;
+
+ u64 rx_byte_tc0_phy;
+ u64 tx_byte_tc0_phy;
+ u64 rx_byte_tc1_phy;
+ u64 tx_byte_tc1_phy;
+ u64 rx_byte_tc2_phy;
+ u64 tx_byte_tc2_phy;
+ u64 rx_byte_tc3_phy;
+ u64 tx_byte_tc3_phy;
+ u64 rx_byte_tc4_phy;
+ u64 tx_byte_tc4_phy;
+ u64 rx_byte_tc5_phy;
+ u64 tx_byte_tc5_phy;
+ u64 rx_byte_tc6_phy;
+ u64 tx_byte_tc6_phy;
+ u64 rx_byte_tc7_phy;
+ u64 tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ u64 rx_pause_tc0_phy;
+ u64 tx_pause_tc0_phy;
+ u64 rx_pause_tc1_phy;
+ u64 tx_pause_tc1_phy;
+ u64 rx_pause_tc2_phy;
+ u64 tx_pause_tc2_phy;
+ u64 rx_pause_tc3_phy;
+ u64 tx_pause_tc3_phy;
+ u64 rx_pause_tc4_phy;
+ u64 tx_pause_tc4_phy;
+ u64 rx_pause_tc5_phy;
+ u64 tx_pause_tc5_phy;
+ u64 rx_pause_tc6_phy;
+ u64 tx_pause_tc6_phy;
+ u64 rx_pause_tc7_phy;
+ u64 tx_pause_tc7_phy;
};
struct mana_context {
struct gdma_dev *gdma_dev;
u16 num_ports;
+ u8 bm_hostmode;
+ struct mana_ethtool_hc_stats hc_stats;
struct mana_eq *eqs;
+ struct dentry *mana_eqs_debugfs;
+
+ /* Workqueue for querying hardware stats */
+ struct delayed_work gf_stats_work;
+ bool hwc_timeout_occurred;
struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
+
+ /* Link state change work */
+ struct work_struct link_change_work;
+ u32 link_event;
};
struct mana_port_context {
@@ -358,10 +507,11 @@ struct mana_port_context {
struct mana_tx_qp *tx_qp;
/* Indirection Table for RX & TX. The values are queue indexes */
- u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
+ u32 *indir_table;
+ u32 indir_table_sz;
/* Indirection table containing RxObject Handles */
- mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
+ mana_handle_t *rxobj_table;
/* Hash key used by the NIC */
u8 hashkey[MANA_HASH_KEY_SIZE];
@@ -369,12 +519,24 @@ struct mana_port_context {
/* This points to an array of num_queues of RQ pointers. */
struct mana_rxq **rxqs;
+ /* pre-allocated rx buffer array */
+ void **rxbufs_pre;
+ dma_addr_t *das_pre;
+ int rxbpre_total;
+ u32 rxbpre_datasize;
+ u32 rxbpre_alloc_size;
+ u32 rxbpre_headroom;
+ u32 rxbpre_frag_count;
+
struct bpf_prog *bpf_prog;
/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
unsigned int max_queues;
unsigned int num_queues;
+ unsigned int rx_queue_size;
+ unsigned int tx_queue_size;
+
mana_handle_t port_handle;
mana_handle_t pf_filter_handle;
@@ -382,12 +544,24 @@ struct mana_port_context {
struct mutex vport_mutex;
int vport_use_count;
+ /* Net shaper handle*/
+ struct net_shaper_handle handle;
+
u16 port_idx;
+ /* Currently configured speed (mbps) */
+ u32 speed;
+ /* Maximum speed supported by the SKU (mbps) */
+ u32 max_speed;
bool port_is_up;
bool port_st_save; /* Saved port state */
struct mana_ethtool_stats eth_stats;
+
+ struct mana_ethtool_phy_stats phy_stats;
+
+ /* Debugfs */
+ struct dentry *mana_port_debugfs;
};
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
@@ -401,6 +575,9 @@ int mana_detach(struct net_device *ndev, bool from_close);
int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
+int mana_rdma_probe(struct gdma_dev *gd);
+void mana_rdma_remove(struct gdma_dev *gd);
+
void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
u32 flags);
@@ -409,8 +586,17 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+int mana_query_gf_stats(struct mana_context *ac);
+int mana_query_link_cfg(struct mana_port_context *apc);
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping);
+void mana_query_phy_stats(struct mana_port_context *apc);
+int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
+void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
+extern struct dentry *mana_debugfs_root;
/* A CQ can be created not associated with any EQ */
#define GDMA_CQ_NO_EQ 0xffff
@@ -432,6 +618,9 @@ enum mana_command_code {
MANA_FENCE_RQ = 0x20006,
MANA_CONFIG_VPORT_RX = 0x20007,
MANA_QUERY_VPORT_CONFIG = 0x20008,
+ MANA_QUERY_LINK_CONFIG = 0x2000A,
+ MANA_SET_BW_CLAMP = 0x2000B,
+ MANA_QUERY_PHY_STAT = 0x2000c,
/* Privileged commands for the PF mode */
MANA_REGISTER_FILTER = 0x28000,
@@ -440,6 +629,35 @@ enum mana_command_code {
MANA_DEREGISTER_HW_PORT = 0x28004,
};
+/* Query Link Configuration*/
+struct mana_query_link_config_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+}; /* HW DATA */
+
+struct mana_query_link_config_resp {
+ struct gdma_resp_hdr hdr;
+ u32 qos_speed_mbps;
+ u8 qos_unconfigured;
+ u8 reserved1[3];
+ u32 link_speed_mbps;
+ u8 reserved2[4];
+}; /* HW DATA */
+
+/* Set Bandwidth Clamp*/
+struct mana_set_bw_clamp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ enum TRI_STATE enable_clamping;
+ u32 link_speed_mbps;
+}; /* HW DATA */
+
+struct mana_set_bw_clamp_resp {
+ struct gdma_resp_hdr hdr;
+ u8 qos_unconfigured;
+ u8 reserved[7];
+}; /* HW DATA */
+
/* Query Device Configuration */
struct mana_query_device_cfg_req {
struct gdma_req_hdr hdr;
@@ -466,8 +684,14 @@ struct mana_query_device_cfg_resp {
u64 pf_cap_flags4;
u16 max_num_vports;
- u16 reserved;
+ u8 bm_hostmode; /* response v3: Bare Metal Host Mode */
+ u8 reserved;
u32 max_num_eqs;
+
+ /* response v2: */
+ u16 adapter_mtu;
+ u16 reserved2;
+ u32 reserved3;
}; /* HW DATA */
/* Query vPort Configuration */
@@ -545,8 +769,119 @@ struct mana_fence_rq_resp {
struct gdma_resp_hdr hdr;
}; /* HW DATA */
+/* Query stats RQ */
+struct mana_query_gf_stat_req {
+ struct gdma_req_hdr hdr;
+ u64 req_stats;
+}; /* HW DATA */
+
+struct mana_query_gf_stat_resp {
+ struct gdma_resp_hdr hdr;
+ u64 reported_stats;
+ /* rx errors/discards */
+ u64 rx_discards_nowqe;
+ u64 rx_err_vport_disabled;
+ /* rx bytes/packets */
+ u64 hc_rx_bytes;
+ u64 hc_rx_ucast_pkts;
+ u64 hc_rx_ucast_bytes;
+ u64 hc_rx_bcast_pkts;
+ u64 hc_rx_bcast_bytes;
+ u64 hc_rx_mcast_pkts;
+ u64 hc_rx_mcast_bytes;
+ /* tx errors */
+ u64 tx_err_gf_disabled;
+ u64 tx_err_vport_disabled;
+ u64 tx_err_inval_vport_offset_pkt;
+ u64 tx_err_vlan_enforcement;
+ u64 tx_err_ethtype_enforcement;
+ u64 tx_err_SA_enforcement;
+ u64 tx_err_SQPDID_enforcement;
+ u64 tx_err_CQPDID_enforcement;
+ u64 tx_err_mtu_violation;
+ u64 tx_err_inval_oob;
+ /* tx bytes/packets */
+ u64 hc_tx_bytes;
+ u64 hc_tx_ucast_pkts;
+ u64 hc_tx_ucast_bytes;
+ u64 hc_tx_bcast_pkts;
+ u64 hc_tx_bcast_bytes;
+ u64 hc_tx_mcast_pkts;
+ u64 hc_tx_mcast_bytes;
+ /* tx error */
+ u64 tx_err_gdma;
+}; /* HW DATA */
+
+/* Query phy stats */
+struct mana_query_phy_stat_req {
+ struct gdma_req_hdr hdr;
+ u64 req_stats;
+}; /* HW DATA */
+
+struct mana_query_phy_stat_resp {
+ struct gdma_resp_hdr hdr;
+ u64 reported_stats;
+
+ /* Aggregate Drop Counters */
+ u64 rx_pkt_drop_phy;
+ u64 tx_pkt_drop_phy;
+
+ /* Per TC(Traffic class) traffic Counters */
+ u64 rx_pkt_tc0_phy;
+ u64 tx_pkt_tc0_phy;
+ u64 rx_pkt_tc1_phy;
+ u64 tx_pkt_tc1_phy;
+ u64 rx_pkt_tc2_phy;
+ u64 tx_pkt_tc2_phy;
+ u64 rx_pkt_tc3_phy;
+ u64 tx_pkt_tc3_phy;
+ u64 rx_pkt_tc4_phy;
+ u64 tx_pkt_tc4_phy;
+ u64 rx_pkt_tc5_phy;
+ u64 tx_pkt_tc5_phy;
+ u64 rx_pkt_tc6_phy;
+ u64 tx_pkt_tc6_phy;
+ u64 rx_pkt_tc7_phy;
+ u64 tx_pkt_tc7_phy;
+
+ u64 rx_byte_tc0_phy;
+ u64 tx_byte_tc0_phy;
+ u64 rx_byte_tc1_phy;
+ u64 tx_byte_tc1_phy;
+ u64 rx_byte_tc2_phy;
+ u64 tx_byte_tc2_phy;
+ u64 rx_byte_tc3_phy;
+ u64 tx_byte_tc3_phy;
+ u64 rx_byte_tc4_phy;
+ u64 tx_byte_tc4_phy;
+ u64 rx_byte_tc5_phy;
+ u64 tx_byte_tc5_phy;
+ u64 rx_byte_tc6_phy;
+ u64 tx_byte_tc6_phy;
+ u64 rx_byte_tc7_phy;
+ u64 tx_byte_tc7_phy;
+
+ /* Per TC(Traffic Class) pause Counters */
+ u64 rx_pause_tc0_phy;
+ u64 tx_pause_tc0_phy;
+ u64 rx_pause_tc1_phy;
+ u64 tx_pause_tc1_phy;
+ u64 rx_pause_tc2_phy;
+ u64 tx_pause_tc2_phy;
+ u64 rx_pause_tc3_phy;
+ u64 tx_pause_tc3_phy;
+ u64 rx_pause_tc4_phy;
+ u64 tx_pause_tc4_phy;
+ u64 rx_pause_tc5_phy;
+ u64 tx_pause_tc5_phy;
+ u64 rx_pause_tc6_phy;
+ u64 tx_pause_tc6_phy;
+ u64 rx_pause_tc7_phy;
+ u64 tx_pause_tc7_phy;
+}; /* HW DATA */
+
/* Configure vPort Rx Steering */
-struct mana_cfg_rx_steer_req {
+struct mana_cfg_rx_steer_req_v2 {
struct gdma_req_hdr hdr;
mana_handle_t vport;
u16 num_indir_entries;
@@ -559,6 +894,9 @@ struct mana_cfg_rx_steer_req {
u8 reserved;
mana_handle_t default_rxobj;
u8 hashkey[MANA_HASH_KEY_SIZE];
+ u8 cqe_coalescing_enable;
+ u8 reserved2[7];
+ mana_handle_t indir_tab[] __counted_by(num_indir_entries);
}; /* HW DATA */
struct mana_cfg_rx_steer_resp {
@@ -622,6 +960,42 @@ struct mana_deregister_filter_resp {
struct gdma_resp_hdr hdr;
}; /* HW DATA */
+/* Requested GF stats Flags */
+/* Rx discards/Errors */
+#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001
+#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002
+/* Rx bytes/pkts */
+#define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004
+#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008
+#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010
+#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020
+#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040
+#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080
+#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100
+/* Tx errors */
+#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200
+#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400
+#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \
+ 0x0000000000000800
+#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000
+#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \
+ 0x0000000000002000
+#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000
+#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000
+#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000
+#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000
+#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000
+/* Tx bytes/pkts */
+#define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000
+#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000
+#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000
+#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000
+#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000
+#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000
+#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000
+/* Tx error */
+#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
+
#define MANA_MAX_NUM_QUEUES 64
#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
@@ -648,4 +1022,8 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id);
void mana_uncfg_vport(struct mana_port_context *apc);
+
+struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+ u32 port_index,
+ netdevice_tracker *tracker);
#endif /* _MANA_H */