diff options
Diffstat (limited to 'include/linux/mlx5/driver.h')
| -rw-r--r-- | include/linux/mlx5/driver.h | 869 |
1 files changed, 575 insertions, 294 deletions
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 54299251d40d..1c54aa6f74fb 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -36,34 +36,36 @@ #include <linux/kernel.h> #include <linux/completion.h> #include <linux/pci.h> +#include <linux/pci-tph.h> #include <linux/irq.h> #include <linux/spinlock_types.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <linux/radix-tree.h> +#include <linux/xarray.h> #include <linux/workqueue.h> #include <linux/mempool.h> #include <linux/interrupt.h> -#include <linux/idr.h> #include <linux/notifier.h> +#include <linux/refcount.h> +#include <linux/auxiliary_bus.h> +#include <linux/mutex.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> #include <linux/mlx5/eq.h> #include <linux/timecounter.h> -#include <linux/ptp_clock_kernel.h> +#include <net/devlink.h> + +#define MLX5_ADEV_NAME "mlx5_core" + +#define MLX5_IRQ_EQ_CTRL (U8_MAX) enum { MLX5_BOARD_ID_LEN = 64, - MLX5_MAX_NAME_LEN = 16, }; enum { - /* one minute for the sake of bringup. Generally, commands must always - * complete and we may need to increase this timeout value - */ - MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; @@ -82,7 +84,7 @@ enum mlx5_sqp_t { }; enum { - MLX5_MAX_PORTS = 2, + MLX5_MAX_PORTS = 8, }; enum { @@ -98,6 +100,8 @@ enum { }; enum { + MLX5_REG_SBPR = 0xb001, + MLX5_REG_SBCM = 0xb002, MLX5_REG_QPTS = 0x4002, MLX5_REG_QETCR = 0x4005, MLX5_REG_QTCT = 0x400a, @@ -108,6 +112,7 @@ enum { MLX5_REG_FPGA_CAP = 0x4022, MLX5_REG_FPGA_CTRL = 0x4023, MLX5_REG_FPGA_ACCESS_REG = 0x4024, + MLX5_REG_CORE_DUMP = 0x402e, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -122,25 +127,47 @@ enum { MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, MLX5_REG_PCMR = 0x5041, + MLX5_REG_PDDR = 0x5031, MLX5_REG_PMLP = 0x5002, MLX5_REG_PPLM = 0x5023, + MLX5_REG_PPHCR = 0x503E, MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, + MLX5_REG_MTCAP = 0x9009, + MLX5_REG_MTMP = 0x900A, MLX5_REG_MCIA = 0x9014, + MLX5_REG_MNVDA = 0x9024, + MLX5_REG_MFRL = 0x9028, MLX5_REG_MLCR = 0x902b, + MLX5_REG_MRTC = 0x902d, MLX5_REG_MTRC_CAP = 0x9040, MLX5_REG_MTRC_CONF = 0x9041, MLX5_REG_MTRC_STDB = 0x9042, MLX5_REG_MTRC_CTRL = 0x9043, + MLX5_REG_MPEIN = 0x9050, MLX5_REG_MPCNT = 0x9051, MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, + MLX5_REG_MTUTC = 0x9055, MLX5_REG_MPEGC = 0x9056, + MLX5_REG_MPIR = 0x9059, + MLX5_REG_MCQS = 0x9060, MLX5_REG_MCQI = 0x9061, MLX5_REG_MCC = 0x9062, MLX5_REG_MCDA = 0x9063, MLX5_REG_MCAM = 0x907f, + MLX5_REG_MSECQ = 0x9155, + MLX5_REG_MSEES = 0x9156, + MLX5_REG_MIRC = 0x9162, + MLX5_REG_MTPTM = 0x9180, + MLX5_REG_MTCTR = 0x9181, + MLX5_REG_MRTCQ = 0x9182, + MLX5_REG_SBCAM = 0xB01F, + MLX5_REG_RESOURCE_DUMP = 0xC000, + MLX5_REG_NIC_CAP = 0xC00D, + MLX5_REG_DTOR = 0xC00E, + MLX5_REG_VHCA_ICM_CTRL = 0xC010, }; enum mlx5_qpts_trust_state { @@ -180,8 +207,13 @@ enum port_state_policy { MLX5_POLICY_INVALID = 0xffffffff }; +enum mlx5_coredev_type { + MLX5_COREDEV_PF, + MLX5_COREDEV_VF, + MLX5_COREDEV_SF, +}; + struct mlx5_field_desc { - struct dentry *dent; int i; }; @@ -190,11 +222,13 @@ struct mlx5_rsc_debug { void *object; enum dbg_rsc_type type; struct dentry *root; - struct mlx5_field_desc fields[0]; + struct mlx5_field_desc fields[]; }; enum mlx5_dev_event { MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ + MLX5_DEV_EVENT_PORT_AFFINITY = 129, + MLX5_DEV_EVENT_MULTIPORT_ESW = 130, }; enum mlx5_port_status { @@ -202,21 +236,10 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; -struct mlx5_bfreg_info { - u32 *sys_pages; - int num_low_latency_bfregs; - unsigned int *count; - - /* - * protect bfreg allocation data structs - */ - struct mutex lock; - u32 ver; - bool lib_uar_4k; - u32 num_sys_pages; - u32 num_static_sys_pages; - u32 total_num_bfregs; - u32 num_dyn_bfregs; +enum mlx5_cmdif_state { + MLX5_CMDIF_STATE_UNINITIALIZED, + MLX5_CMDIF_STATE_UP, + MLX5_CMDIF_STATE_DOWN, }; struct mlx5_cmd_first { @@ -233,11 +256,6 @@ struct mlx5_cmd_msg { struct mlx5_cmd_debug { struct dentry *dbg_root; - struct dentry *dbg_in; - struct dentry *dbg_out; - struct dentry *dbg_outlen; - struct dentry *dbg_status; - struct dentry *dbg_run; void *in_msg; void *out_msg; u8 status; @@ -261,9 +279,17 @@ enum { struct mlx5_cmd_stats { u64 sum; u64 n; + /* number of times command failed */ + u64 failed; + /* number of times command failed on bad status returned by FW */ + u64 failed_mbox_status; + /* last command failed returned errno */ + u32 last_failed_errno; + /* last bad status returned by FW */ + u8 last_failed_mbox_status; + /* last command failed syndrome returned by FW */ + u32 last_failed_syndrome; struct dentry *root; - struct dentry *avg; - struct dentry *count; /* protect command average calculations */ spinlock_t lock; }; @@ -271,17 +297,25 @@ struct mlx5_cmd_stats { struct mlx5_cmd { struct mlx5_nb nb; + /* members which needs to be queried or reinitialized each reload */ + struct { + u16 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + unsigned long bitmask; + struct semaphore sem; + struct semaphore pages_sem; + struct semaphore throttle_sem; + struct semaphore unprivileged_sem; + struct xarray privileged_uids; + } vars; + enum mlx5_cmdif_state state; void *cmd_alloc_buf; dma_addr_t alloc_dma; int alloc_size; void *cmd_buf; dma_addr_t dma; - u16 cmdif_rev; - u8 log_sz; - u8 log_stride; - int max_reg_cmds; - int events; - u32 __iomem *vector; /* protect command queue allocations */ @@ -291,25 +325,16 @@ struct mlx5_cmd { */ spinlock_t token_lock; u8 token; - unsigned long bitmask; char wq_name[MLX5_CMD_WQ_MAX_NAME]; struct workqueue_struct *wq; - struct semaphore sem; - struct semaphore pages_sem; int mode; + u16 allowed_opcode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct dma_pool *pool; struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; int checksum_disabled; - struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; -}; - -struct mlx5_port_caps { - int gid_table_len; - int pkey_table_len; - u8 ext_port_cap; - bool has_smi; + struct xarray stats; }; struct mlx5_cmd_mailbox { @@ -361,19 +386,6 @@ struct mlx5_core_sig_ctx { u32 sigerr_count; }; -enum { - MLX5_MKEY_MR = 1, - MLX5_MKEY_MW, -}; - -struct mlx5_core_mkey { - u64 iova; - u64 size; - u32 key; - u32 pd; - u32 type; -}; - #define MLX5_24BIT_MASK ((1 << 24) - 1) enum mlx5_res_type { @@ -383,13 +395,13 @@ enum mlx5_res_type { MLX5_RES_SRQ = 3, MLX5_RES_XSRQ = 4, MLX5_RES_XRQ = 5, - MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT, }; struct mlx5_core_rsc_common { enum mlx5_res_type res; - atomic_t refcount; + refcount_t refcount; struct completion free; + bool invalid; }; struct mlx5_uars_page { @@ -422,7 +434,6 @@ struct mlx5_sq_bfreg { struct mlx5_uars_page *up; bool wc; u32 index; - unsigned int offset; }; struct mlx5_core_health { @@ -431,63 +442,56 @@ struct mlx5_core_health { struct timer_list timer; u32 prev; int miss_counter; - bool sick; - /* wq spinlock to synchronize draining */ - spinlock_t wq_lock; + u8 synd; + u32 fatal_error; + u32 crdump_size; struct workqueue_struct *wq; unsigned long flags; - struct work_struct work; - struct delayed_work recover_work; -}; - -struct mlx5_qp_table { - struct notifier_block nb; - - /* protect radix tree - */ - spinlock_t lock; - struct radix_tree_root tree; + struct work_struct fatal_report_work; + struct work_struct report_work; + struct devlink_health_reporter *fw_reporter; + struct devlink_health_reporter *fw_fatal_reporter; + struct devlink_health_reporter *vnic_reporter; + struct delayed_work update_fw_log_ts_work; }; -struct mlx5_mkey_table { - /* protect radix tree - */ - rwlock_t lock; - struct radix_tree_root tree; +enum { + MLX5_PF_NOTIFY_DISABLE_VF, + MLX5_PF_NOTIFY_ENABLE_VF, }; struct mlx5_vf_context { int enabled; u64 port_guid; u64 node_guid; + /* Valid bits are used to validate administrative guid only. + * Enabled after ndo_set_vf_guid + */ + u8 port_guid_valid:1; + u8 node_guid_valid:1; enum port_state_policy policy; + struct blocking_notifier_head notifier; }; struct mlx5_core_sriov { struct mlx5_vf_context *vfs_ctx; int num_vfs; - int enabled_vfs; -}; - -struct mlx5_fc_stats { - spinlock_t counters_idr_lock; /* protects counters_idr */ - struct idr counters_idr; - struct list_head counters; - struct llist_head addlist; - struct llist_head dellist; - - struct workqueue_struct *wq; - struct delayed_work work; - unsigned long next_query; - unsigned long sampling_interval; /* jiffies */ + u16 max_vfs; + u16 max_ec_vfs; }; struct mlx5_events; struct mlx5_mpfs; struct mlx5_eswitch; struct mlx5_lag; -struct mlx5_devcom; +struct mlx5_devcom_dev; +struct mlx5_fw_reset; struct mlx5_eq_table; +struct mlx5_irq_table; +struct mlx5_sf_dev_table; +struct mlx5_sf_hw_table; +struct mlx5_sf_table; +struct mlx5_crypto_dek_priv; struct mlx5_rate_limit { u32 rate; @@ -496,9 +500,11 @@ struct mlx5_rate_limit { }; struct mlx5_rl_entry { - struct mlx5_rate_limit rl; - u16 index; - u16 refcount; + u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)]; + u64 refcount; + u16 index; + u16 uid; + u8 dedicated : 1; }; struct mlx5_rl_table { @@ -508,75 +514,131 @@ struct mlx5_rl_table { u32 max_rate; u32 min_rate; struct mlx5_rl_entry *rl_entry; + u64 refcount; +}; + +struct mlx5_core_roce { + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct mlx5_flow_handle *allow_rule; +}; + +enum { + MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0, + MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1, + /* Set during device detach to block any further devices + * creation/deletion on drivers rescan. Unset during device attach. + */ + MLX5_PRIV_FLAGS_DETACH = 1 << 2, + MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3, +}; + +struct mlx5_adev { + struct auxiliary_device adev; + struct mlx5_core_dev *mdev; + int idx; +}; + +struct mlx5_debugfs_entries { + struct dentry *dbg_root; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + struct dentry *pages_debugfs; + struct dentry *lag_debugfs; +}; + +enum mlx5_func_type { + MLX5_PF, + MLX5_VF, + MLX5_SF, + MLX5_HOST_PF, + MLX5_EC_VF, + MLX5_FUNC_TYPE_NUM, }; +struct mlx5_ft_pool; struct mlx5_priv { - char name[MLX5_MAX_NAME_LEN]; + /* IRQ table valid only for real pci devices PF or VF */ + struct mlx5_irq_table *irq_table; struct mlx5_eq_table *eq_table; /* pages stuff */ struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; - struct rb_root page_root; - int fw_pages; + struct xarray page_root_xa; atomic_t reg_pages; struct list_head free_list; - int vfs_pages; + u32 fw_pages; + u32 page_counters[MLX5_FUNC_TYPE_NUM]; + u32 fw_pages_alloc_failed; + u32 give_pages_dropped; + u32 reclaim_pages_discard; struct mlx5_core_health health; + struct list_head traps; - /* start: qp staff */ - struct mlx5_qp_table qp_table; - struct dentry *qp_debugfs; - struct dentry *eq_debugfs; - struct dentry *cq_debugfs; - struct dentry *cmdif_debugfs; - /* end: qp staff */ - - /* start: mkey staff */ - struct mlx5_mkey_table mkey_table; - /* end: mkey staff */ + struct mlx5_debugfs_entries dbg; /* start: alloc staff */ - /* protect buffer alocation according to numa node */ + /* protect buffer allocation according to numa node */ struct mutex alloc_mutex; int numa_node; struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ - struct dentry *dbg_root; - - /* protect mkey key part */ - spinlock_t mkey_lock; - u8 mkey_key; - struct list_head dev_list; - struct list_head ctx_list; - spinlock_t ctx_lock; + struct mlx5_adev **adev; + int adev_idx; + int sw_vhca_id; struct mlx5_events *events; + struct mlx5_vhca_events *vhca_events; struct mlx5_flow_steering *steering; struct mlx5_mpfs *mpfs; + struct blocking_notifier_head esw_n_head; struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; struct mlx5_lag *lag; - struct mlx5_devcom *devcom; - unsigned long pci_dev_data; - struct mlx5_fc_stats fc_stats; + u32 flags; + struct mlx5_devcom_dev *devc; + struct mlx5_devcom_comp_dev *hca_devcom_comp; + struct mlx5_fw_reset *fw_reset; + struct mlx5_core_roce roce; + struct mlx5_fc_stats *fc_stats; struct mlx5_rl_table rl_table; + struct mlx5_ft_pool *ft_pool; struct mlx5_bfreg_data bfregs; - struct mlx5_uars_page *uar; + struct mlx5_sq_bfreg bfreg; +#ifdef CONFIG_MLX5_SF + struct mlx5_nb vhca_state_nb; + struct blocking_notifier_head vhca_state_n_head; + struct notifier_block sf_dev_nb; + struct mlx5_sf_dev_table *sf_dev_table; + struct mlx5_core_dev *parent_mdev; +#endif +#ifdef CONFIG_MLX5_SF_MANAGER + struct notifier_block sf_hw_table_vhca_nb; + struct mlx5_sf_hw_table *sf_hw_table; + struct notifier_block sf_table_esw_nb; + struct notifier_block sf_table_vhca_nb; + struct notifier_block sf_table_mdev_nb; + struct mlx5_sf_table *sf_table; +#endif + struct blocking_notifier_head lag_nh; }; enum mlx5_device_state { - MLX5_DEVICE_STATE_UP, + MLX5_DEVICE_STATE_UP = 1, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; enum mlx5_interface_state { MLX5_INTERFACE_STATE_UP = BIT(0), + MLX5_BREAK_FW_WAIT = BIT(1), }; enum mlx5_pci_status { @@ -591,15 +653,34 @@ enum mlx5_pagefault_type_flags { }; struct mlx5_td { + /* protects tirs list changes while tirs refresh */ + struct mutex list_lock; struct list_head tirs_list; u32 tdn; }; struct mlx5e_resources { - u32 pdn; - struct mlx5_td td; - struct mlx5_core_mkey mkey; - struct mlx5_sq_bfreg bfreg; + struct mlx5e_hw_objs { + u32 pdn; + struct mlx5_td td; + u32 mkey; + struct mlx5_sq_bfreg *bfregs; + unsigned int num_bfregs; +#define MLX5_MAX_NUM_TC 8 + u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]; + bool tisn_valid; + } hw_objs; + struct net_device *uplink_netdev; + netdevice_tracker tracker; + struct mutex uplink_netdev_lock; + struct mlx5_crypto_dek_priv *dek_priv; +}; + +enum mlx5_sw_icm_type { + MLX5_SW_ICM_TYPE_STEERING, + MLX5_SW_ICM_TYPE_HEADER_MODIFY, + MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN, + MLX5_SW_ICM_TYPE_SW_ENCAP, }; #define MLX5_MAX_RESERVED_GIDS 8 @@ -610,33 +691,53 @@ struct mlx5_rsvd_gids { struct ida ida; }; -#define MAX_PIN_NUM 8 -struct mlx5_pps { - u8 pin_caps[MAX_PIN_NUM]; - struct work_struct out_work; - u64 start[MAX_PIN_NUM]; - u8 enabled; +struct mlx5_clock; +struct mlx5_clock_dev_state; +struct mlx5_dm; +struct mlx5_fw_tracer; +struct mlx5_vxlan; +struct mlx5_geneve; +struct mlx5_hv_vhca; +struct mlx5_st; + +#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) +#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) + +enum { + MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, + MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, +}; + +enum { + MKEY_CACHE_LAST_STD_ENTRY = 20, + MLX5_IMR_KSM_CACHE_ENTRY, + MAX_MKEY_CACHE_ENTRIES }; -struct mlx5_clock { - struct mlx5_core_dev *mdev; - struct mlx5_nb pps_nb; - seqlock_t lock; - struct cyclecounter cycles; - struct timecounter tc; - struct hwtstamp_config hwtstamp_config; - u32 nominal_c_mult; - unsigned long overflow_period; - struct delayed_work overflow_work; - struct ptp_clock *ptp; - struct ptp_clock_info ptp_info; - struct mlx5_pps pps_info; +struct mlx5_profile { + u64 mask; + u8 log_max_qp; + u8 num_cmd_caches; + struct { + int size; + int limit; + } mr_cache[MAX_MKEY_CACHE_ENTRIES]; }; -struct mlx5_fw_tracer; -struct mlx5_vxlan; +struct mlx5_hca_cap { + u32 cur[MLX5_UN_SZ_DW(hca_cap_union)]; + u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; +}; + +enum mlx5_wc_state { + MLX5_WC_STATE_UNINITIALIZED, + MLX5_WC_STATE_UNSUPPORTED, + MLX5_WC_STATE_SUPPORTED, +}; struct mlx5_core_dev { + struct device *device; + enum mlx5_coredev_type coredev_type; struct pci_dev *pdev; /* sync pci state */ struct mutex pci_status_mutex; @@ -644,28 +745,32 @@ struct mlx5_core_dev { u8 rev_id; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; - struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; struct { - u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; - u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + struct mlx5_hca_cap *hca[MLX5_CAP_NUM]; u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; - u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; + u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; + u8 embedded_cpu; } caps; + struct mlx5_timeouts *timeouts; u64 sys_image_guid; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; + phys_addr_t bar_addr; enum mlx5_device_state state; /* sync interface state */ struct mutex intf_state_mutex; + struct lock_class_key lock_key; unsigned long intf_state; struct mlx5_priv priv; - struct mlx5_profile *profile; - atomic_t num_qps; + struct mlx5_profile profile; u32 issi; struct mlx5e_resources mlx5e_res; + struct mlx5_dm *dm; + struct mlx5_st *st; struct mlx5_vxlan *vxlan; + struct mlx5_geneve *geneve; struct { struct mlx5_rsvd_gids reserved_gids; u32 roce_en; @@ -673,10 +778,26 @@ struct mlx5_core_dev { #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; #endif - struct mlx5_clock clock; + struct mlx5_clock *clock; + struct mlx5_clock_dev_state *clock_state; struct mlx5_ib_clock_info *clock_info; - struct page *clock_info_page; struct mlx5_fw_tracer *tracer; + struct mlx5_rsc_dump *rsc_dump; + u32 vsc_addr; + struct mlx5_hv_vhca *hv_vhca; + struct mlx5_hwmon *hwmon; + u64 num_block_tc; + u64 num_block_ipsec; +#ifdef CONFIG_MLX5_MACSEC + struct mlx5_macsec_fs *macsec_fs; + /* MACsec notifier chain to sync MACsec core and IB database */ + struct blocking_notifier_head macsec_nh; +#endif + u64 num_ipsec_offloads; + struct mlx5_sd *sd; + enum mlx5_wc_state wc_state; + /* sync write combining state */ + struct mutex wc_state_lock; }; struct mlx5_db { @@ -689,6 +810,8 @@ struct mlx5_db { int index; }; +#define MLX5_DEFAULT_NUM_DOORBELLS 8 + enum { MLX5_COMP_EQ_SIZE = 1024, }; @@ -702,6 +825,7 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context); enum { MLX5_CMD_ENT_STATE_PENDING_COMP, + MLX5_CMD_ENT_STATE_TIMEDOUT, }; struct mlx5_cmd_work_ent { @@ -714,6 +838,8 @@ struct mlx5_cmd_work_ent { struct delayed_work cb_timeout_work; void *context; int idx; + struct completion handling; + struct completion slotted; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; @@ -726,11 +852,8 @@ struct mlx5_cmd_work_ent { u64 ts2; u16 op; bool polling; -}; - -struct mlx5_pas { - u64 pa; - u8 log_sz; + /* Track the max comp handlers */ + refcount_t refcnt; }; enum phy_port_state { @@ -762,22 +885,13 @@ struct mlx5_hca_vport_context { u16 qkey_violation_counter; u16 pkey_violation_counter; bool grh_required; + u8 num_plane; }; -static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset) -{ - return buf->frags->buf + offset; -} - #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field -static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) -{ - return pci_get_drvdata(pdev); -} - extern struct dentry *mlx5_debugfs_root; static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) @@ -795,14 +909,14 @@ static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } -static inline u16 cmdif_rev(struct mlx5_core_dev *dev) +static inline u32 mlx5_base_mkey(const u32 key) { - return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; + return key & 0xffffff00u; } -static inline u32 mlx5_base_mkey(const u32 key) +static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) { - return key & 0xffffff00u; + return ((u32)1 << log_sz) << log_stride; } static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags, @@ -845,104 +959,131 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); } -int mlx5_cmd_init(struct mlx5_core_dev *dev); -void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); +enum { + CMD_ALLOWED_OPCODE_ALL, +}; + void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); +void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); + +struct mlx5_async_ctx { + struct mlx5_core_dev *dev; + atomic_t num_inflight; + struct completion inflight_done; +}; +struct mlx5_async_work; + +typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); + +struct mlx5_async_work { + struct mlx5_async_ctx *ctx; + mlx5_async_cbk_t user_callback; + u16 opcode; /* cmd opcode */ + u16 op_mod; /* cmd op_mod */ + u8 throttle_locked:1; + u8 unpriv_locked:1; + void *out; /* pointer to the cmd output buffer */ +}; + +void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, + struct mlx5_async_ctx *ctx); +void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); +int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, + void *out, int out_size, mlx5_async_cbk_t callback, + struct mlx5_async_work *work); +void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out); +int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); +int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); -int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, - void *out, int out_size, mlx5_cmd_cbk_t callback, - void *context); + +#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ + ({ \ + mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ + MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ + }) + +#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ + ({ \ + u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ + mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ + }) + int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); -void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); +bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); +int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid); +void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid); + +void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev); +void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev); + +void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data); -int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); -int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); -int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); +void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); -void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); -int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, - struct mlx5_frag_buf *buf, int node); -int mlx5_buf_alloc(struct mlx5_core_dev *dev, - int size, struct mlx5_frag_buf *buf); -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); -struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, - gfp_t flags, int npages); -void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, - struct mlx5_cmd_mailbox *head); -void mlx5_init_mkey_table(struct mlx5_core_dev *dev); -void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); -int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey, - u32 *in, int inlen, - u32 *out, int outlen, - mlx5_cmd_cbk_t callback, void *context); -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey, - u32 *in, int inlen); -int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey); -int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - u32 *out, int outlen); +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, + int inlen); +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey); +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out, + int outlen); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); -int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, - u16 opmod, u8 port); int mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); -void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s32 npages); +void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); -void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); +void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, - unsigned int *irqn); +int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); -int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); +struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev); +void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, + void *data_out, int size_out, u16 reg_id, int arg, + int write, bool verbose); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node); + +static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); +} + void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); const char *mlx5_command_str(int command); -int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); +__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); -int mlx5_query_odp_caps(struct mlx5_core_dev *dev, - struct mlx5_odp_caps *odp_caps); -int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, - u8 port_num, void *out, size_t sz); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, - u32 wq_num, u8 type, int error); -#endif int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); @@ -950,25 +1091,22 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, struct mlx5_rate_limit *rl); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); +int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, + bool dedicated_entry, u16 *index); +void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index); bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, struct mlx5_rate_limit *rl_1); int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, bool map_wc, bool fast_path); void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); -unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev); -struct cpumask * -mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector); +unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev); +int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector); unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); -static inline int fw_initializing(struct mlx5_core_dev *dev) -{ - return ioread32be(&dev->iseg->initializing) >> 31; -} - static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; @@ -984,37 +1122,26 @@ static inline u8 mlx5_mkey_variant(u32 mkey) return mkey & 0xff; } -enum { - MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, - MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, -}; - -enum { - MR_CACHE_LAST_STD_ENTRY = 20, - MLX5_IMR_MTT_CACHE_ENTRY, - MLX5_IMR_KSM_CACHE_ENTRY, - MAX_MR_CACHE_ENTRIES -}; - -enum { - MLX5_INTERFACE_PROTOCOL_IB = 0, - MLX5_INTERFACE_PROTOCOL_ETH = 1, -}; - -struct mlx5_interface { - void * (*add)(struct mlx5_core_dev *dev); - void (*remove)(struct mlx5_core_dev *dev, void *context); - int (*attach)(struct mlx5_core_dev *dev, void *context); - void (*detach)(struct mlx5_core_dev *dev, void *context); - int protocol; - struct list_head list; -}; - -int mlx5_register_interface(struct mlx5_interface *intf); -void mlx5_unregister_interface(struct mlx5_interface *intf); +/* Async-atomic event notifier used by mlx5 core to forward FW + * evetns received from event queue to mlx5 consumers. + * Optimise event queue dipatching. + */ int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); +/* Async-atomic event notifier used for forwarding + * evetns from the event queue into the to mlx5 events dispatcher, + * eswitch, clock and others. + */ +int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); +int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); + +/* Blocking event notifier used to forward SW events, used for slow path */ +int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event, + void *data); + int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); @@ -1022,47 +1149,119 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); -struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); +bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev); +bool mlx5_lag_is_master(struct mlx5_core_dev *dev); +bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev); +bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev); +u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, + struct net_device *slave); int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, size_t *offsets); +struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i); + +#define mlx5_lag_for_each_peer_mdev(dev, peer, i) \ + for (i = 0, peer = mlx5_lag_get_next_peer_mdev(dev, &i); \ + peer; \ + peer = mlx5_lag_get_next_peer_mdev(dev, &i)) + +u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); +int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u32 log_alignment, u16 uid, + phys_addr_t *addr, u32 *obj_id); +int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u16 uid, phys_addr_t addr, u32 obj_id); + +#ifdef CONFIG_PCIE_TPH +int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *st_index); +int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index); +#else +static inline int mlx5_st_alloc_index(struct mlx5_core_dev *dev, + enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *st_index) +{ + return -EOPNOTSUPP; +} +static inline int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index) +{ + return -EOPNOTSUPP; +} +#endif -#ifdef CONFIG_MLX5_CORE_IPOIB -struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, - struct ib_device *ibdev, - const char *name, - void (*setup)(struct net_device *)); -#endif /* CONFIG_MLX5_CORE_IPOIB */ +struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev); +void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev); + +int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev, + int vf_id, + struct notifier_block *nb); +void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev, + int vf_id, + struct notifier_block *nb); int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, struct ib_device *device, struct rdma_netdev_alloc_params *params); -struct mlx5_profile { - u64 mask; - u8 log_max_qp; - struct { - int size; - int limit; - } mr_cache[MAX_MR_CACHE_ENTRIES]; -}; - enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; -static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) +static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) +{ + return dev->coredev_type == MLX5_COREDEV_PF; +} + +static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) +{ + return dev->coredev_type == MLX5_COREDEV_VF; +} + +static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1, + const struct mlx5_core_dev *dev2) +{ + return dev1->coredev_type == dev2->coredev_type; +} + +static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) { - return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); + return dev->caps.embedded_cpu; } -#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev)) -#define MLX5_VPORT_MANAGER(mdev) \ - (MLX5_CAP_GEN(mdev, vport_group_manager) && \ - (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ - mlx5_core_is_pf(mdev)) +static inline bool +mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) +{ + return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); +} + +static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) +{ + return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); +} + +static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) +{ + return dev->priv.sriov.max_vfs; +} + +static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) +{ + /* LACP owner conditions: + * 1) Function is physical. + * 2) LAG is supported by FW. + * 3) LAG is managed by driver (currently the only option). + */ + return MLX5_CAP_GEN(dev, vport_group_manager) && + (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && + MLX5_CAP_GEN(dev, lag_master); +} + +static inline u16 mlx5_core_max_ec_vfs(const struct mlx5_core_dev *dev) +{ + return dev->priv.sriov.max_ec_vfs; +} static inline int mlx5_get_gid_table_len(u16 param) { @@ -1104,8 +1303,90 @@ static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) return MLX5_CAP_GEN(dev, native_port_num); } +static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev) +{ + int idx = MLX5_CAP_GEN(dev, native_port_num); + + if (idx >= 1 && idx <= MLX5_MAX_PORTS) + return idx - 1; + else + return PCI_FUNC(dev->pdev->devfn); +} + enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; +bool mlx5_is_roce_on(struct mlx5_core_dev *dev); + +static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev) +{ + if (MLX5_CAP_GEN(dev, roce_rw_supported)) + return MLX5_CAP_GEN(dev, roce); + + /* If RoCE cap is read-only in FW, get RoCE state from devlink + * in order to support RoCE enable/disable feature + */ + return mlx5_is_roce_on(dev); +} + +#ifdef CONFIG_MLX5_MACSEC +static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) +{ + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD)) + return false; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) || + !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) || + !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt)) + return false; + + return true; +} + +#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX) + +static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev) +{ + if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & + NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) || + !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) || + !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs) + return false; + + return true; +} +#endif + +enum { + MLX5_OCTWORD = 16, +}; + +bool mlx5_wc_support_get(struct mlx5_core_dev *mdev); + +static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) +{ + return devlink_net(priv_to_devlink(dev)); +} + +#define MLX5_SW_IMAGE_GUID_MAX_BYTES 9 + #endif /* MLX5_DRIVER_H */ |
