diff options
Diffstat (limited to 'include/linux/mlx5/driver.h')
| -rw-r--r-- | include/linux/mlx5/driver.h | 1473 |
1 files changed, 1048 insertions, 425 deletions
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f22e4419839b..1c54aa6f74fb 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -36,23 +36,36 @@ #include <linux/kernel.h> #include <linux/completion.h> #include <linux/pci.h> +#include <linux/pci-tph.h> +#include <linux/irq.h> #include <linux/spinlock_types.h> #include <linux/semaphore.h> +#include <linux/slab.h> #include <linux/vmalloc.h> -#include <linux/radix-tree.h> +#include <linux/xarray.h> +#include <linux/workqueue.h> +#include <linux/mempool.h> +#include <linux/interrupt.h> +#include <linux/notifier.h> +#include <linux/refcount.h> +#include <linux/auxiliary_bus.h> +#include <linux/mutex.h> + #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> +#include <linux/mlx5/eq.h> +#include <linux/timecounter.h> +#include <net/devlink.h> + +#define MLX5_ADEV_NAME "mlx5_core" + +#define MLX5_IRQ_EQ_CTRL (U8_MAX) enum { MLX5_BOARD_ID_LEN = 64, - MLX5_MAX_NAME_LEN = 16, }; enum { - /* one minute for the sake of bringup. Generally, commands must always - * complete and we may need to increase this timeout value - */ - MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; @@ -71,116 +84,114 @@ enum mlx5_sqp_t { }; enum { - MLX5_MAX_PORTS = 2, -}; - -enum { - MLX5_EQ_VEC_PAGES = 0, - MLX5_EQ_VEC_CMD = 1, - MLX5_EQ_VEC_ASYNC = 2, - MLX5_EQ_VEC_COMP_BASE, -}; - -enum { - MLX5_MAX_EQ_NAME = 20 + MLX5_MAX_PORTS = 8, }; enum { - MLX5_ATOMIC_MODE_IB_COMP = 1 << 16, - MLX5_ATOMIC_MODE_CX = 2 << 16, - MLX5_ATOMIC_MODE_8B = 3 << 16, - MLX5_ATOMIC_MODE_16B = 4 << 16, - MLX5_ATOMIC_MODE_32B = 5 << 16, - MLX5_ATOMIC_MODE_64B = 6 << 16, - MLX5_ATOMIC_MODE_128B = 7 << 16, - MLX5_ATOMIC_MODE_256B = 8 << 16, -}; - -enum { - MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, - MLX5_CMD_OP_QUERY_ADAPTER = 0x101, - MLX5_CMD_OP_INIT_HCA = 0x102, - MLX5_CMD_OP_TEARDOWN_HCA = 0x103, - MLX5_CMD_OP_QUERY_PAGES = 0x107, - MLX5_CMD_OP_MANAGE_PAGES = 0x108, - MLX5_CMD_OP_SET_HCA_CAP = 0x109, - - MLX5_CMD_OP_CREATE_MKEY = 0x200, - MLX5_CMD_OP_QUERY_MKEY = 0x201, - MLX5_CMD_OP_DESTROY_MKEY = 0x202, - MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, - - MLX5_CMD_OP_CREATE_EQ = 0x301, - MLX5_CMD_OP_DESTROY_EQ = 0x302, - MLX5_CMD_OP_QUERY_EQ = 0x303, - - MLX5_CMD_OP_CREATE_CQ = 0x400, - MLX5_CMD_OP_DESTROY_CQ = 0x401, - MLX5_CMD_OP_QUERY_CQ = 0x402, - MLX5_CMD_OP_MODIFY_CQ = 0x403, - - MLX5_CMD_OP_CREATE_QP = 0x500, - MLX5_CMD_OP_DESTROY_QP = 0x501, - MLX5_CMD_OP_RST2INIT_QP = 0x502, - MLX5_CMD_OP_INIT2RTR_QP = 0x503, - MLX5_CMD_OP_RTR2RTS_QP = 0x504, - MLX5_CMD_OP_RTS2RTS_QP = 0x505, - MLX5_CMD_OP_SQERR2RTS_QP = 0x506, - MLX5_CMD_OP_2ERR_QP = 0x507, - MLX5_CMD_OP_RTS2SQD_QP = 0x508, - MLX5_CMD_OP_SQD2RTS_QP = 0x509, - MLX5_CMD_OP_2RST_QP = 0x50a, - MLX5_CMD_OP_QUERY_QP = 0x50b, - MLX5_CMD_OP_CONF_SQP = 0x50c, - MLX5_CMD_OP_MAD_IFC = 0x50d, - MLX5_CMD_OP_INIT2INIT_QP = 0x50e, - MLX5_CMD_OP_SUSPEND_QP = 0x50f, - MLX5_CMD_OP_UNSUSPEND_QP = 0x510, - MLX5_CMD_OP_SQD2SQD_QP = 0x511, - MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, - MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, - MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, - - MLX5_CMD_OP_CREATE_PSV = 0x600, - MLX5_CMD_OP_DESTROY_PSV = 0x601, - MLX5_CMD_OP_QUERY_PSV = 0x602, - MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, - MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, - - MLX5_CMD_OP_CREATE_SRQ = 0x700, - MLX5_CMD_OP_DESTROY_SRQ = 0x701, - MLX5_CMD_OP_QUERY_SRQ = 0x702, - MLX5_CMD_OP_ARM_RQ = 0x703, - MLX5_CMD_OP_RESIZE_SRQ = 0x704, - - MLX5_CMD_OP_ALLOC_PD = 0x800, - MLX5_CMD_OP_DEALLOC_PD = 0x801, - MLX5_CMD_OP_ALLOC_UAR = 0x802, - MLX5_CMD_OP_DEALLOC_UAR = 0x803, - - MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, - MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, - - - MLX5_CMD_OP_ALLOC_XRCD = 0x80e, - MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, - - MLX5_CMD_OP_ACCESS_REG = 0x805, - MLX5_CMD_OP_MAX = 0x810, + MLX5_ATOMIC_MODE_OFFSET = 16, + MLX5_ATOMIC_MODE_IB_COMP = 1, + MLX5_ATOMIC_MODE_CX = 2, + MLX5_ATOMIC_MODE_8B = 3, + MLX5_ATOMIC_MODE_16B = 4, + MLX5_ATOMIC_MODE_32B = 5, + MLX5_ATOMIC_MODE_64B = 6, + MLX5_ATOMIC_MODE_128B = 7, + MLX5_ATOMIC_MODE_256B = 8, }; enum { + MLX5_REG_SBPR = 0xb001, + MLX5_REG_SBCM = 0xb002, + MLX5_REG_QPTS = 0x4002, + MLX5_REG_QETCR = 0x4005, + MLX5_REG_QTCT = 0x400a, + MLX5_REG_QPDPM = 0x4013, + MLX5_REG_QCAM = 0x4019, + MLX5_REG_DCBX_PARAM = 0x4020, + MLX5_REG_DCBX_APP = 0x4021, + MLX5_REG_FPGA_CAP = 0x4022, + MLX5_REG_FPGA_CTRL = 0x4023, + MLX5_REG_FPGA_ACCESS_REG = 0x4024, + MLX5_REG_CORE_DUMP = 0x402e, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, MLX5_REG_PAOS = 0x5006, + MLX5_REG_PFCC = 0x5007, + MLX5_REG_PPCNT = 0x5008, + MLX5_REG_PPTB = 0x500b, + MLX5_REG_PBMC = 0x500c, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PUDE = 0x5009, MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, - MLX5_REG_PMLP = 0, /* TBD */ + MLX5_REG_PVLC = 0x500f, + MLX5_REG_PCMR = 0x5041, + MLX5_REG_PDDR = 0x5031, + MLX5_REG_PMLP = 0x5002, + MLX5_REG_PPLM = 0x5023, + MLX5_REG_PPHCR = 0x503E, + MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, + MLX5_REG_MTCAP = 0x9009, + MLX5_REG_MTMP = 0x900A, + MLX5_REG_MCIA = 0x9014, + MLX5_REG_MNVDA = 0x9024, + MLX5_REG_MFRL = 0x9028, + MLX5_REG_MLCR = 0x902b, + MLX5_REG_MRTC = 0x902d, + MLX5_REG_MTRC_CAP = 0x9040, + MLX5_REG_MTRC_CONF = 0x9041, + MLX5_REG_MTRC_STDB = 0x9042, + MLX5_REG_MTRC_CTRL = 0x9043, + MLX5_REG_MPEIN = 0x9050, + MLX5_REG_MPCNT = 0x9051, + MLX5_REG_MTPPS = 0x9053, + MLX5_REG_MTPPSE = 0x9054, + MLX5_REG_MTUTC = 0x9055, + MLX5_REG_MPEGC = 0x9056, + MLX5_REG_MPIR = 0x9059, + MLX5_REG_MCQS = 0x9060, + MLX5_REG_MCQI = 0x9061, + MLX5_REG_MCC = 0x9062, + MLX5_REG_MCDA = 0x9063, + MLX5_REG_MCAM = 0x907f, + MLX5_REG_MSECQ = 0x9155, + MLX5_REG_MSEES = 0x9156, + MLX5_REG_MIRC = 0x9162, + MLX5_REG_MTPTM = 0x9180, + MLX5_REG_MTCTR = 0x9181, + MLX5_REG_MRTCQ = 0x9182, + MLX5_REG_SBCAM = 0xB01F, + MLX5_REG_RESOURCE_DUMP = 0xC000, + MLX5_REG_NIC_CAP = 0xC00D, + MLX5_REG_DTOR = 0xC00E, + MLX5_REG_VHCA_ICM_CTRL = 0xC010, +}; + +enum mlx5_qpts_trust_state { + MLX5_QPTS_TRUST_PCP = 1, + MLX5_QPTS_TRUST_DSCP = 2, +}; + +enum mlx5_dcbx_oper_mode { + MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, + MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, +}; + +enum { + MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, + MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, + MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2, + MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3, +}; + +enum mlx5_page_fault_resume_flags { + MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0, + MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1, + MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2, + MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7, }; enum dbg_rsc_type { @@ -189,8 +200,20 @@ enum dbg_rsc_type { MLX5_DBG_RSC_CQ, }; +enum port_state_policy { + MLX5_POLICY_DOWN = 0, + MLX5_POLICY_UP = 1, + MLX5_POLICY_FOLLOW = 2, + MLX5_POLICY_INVALID = 0xffffffff +}; + +enum mlx5_coredev_type { + MLX5_COREDEV_PF, + MLX5_COREDEV_VF, + MLX5_COREDEV_SF, +}; + struct mlx5_field_desc { - struct dentry *dent; int i; }; @@ -199,49 +222,24 @@ struct mlx5_rsc_debug { void *object; enum dbg_rsc_type type; struct dentry *root; - struct mlx5_field_desc fields[0]; + struct mlx5_field_desc fields[]; }; enum mlx5_dev_event { - MLX5_DEV_EVENT_SYS_ERROR, - MLX5_DEV_EVENT_PORT_UP, - MLX5_DEV_EVENT_PORT_DOWN, - MLX5_DEV_EVENT_PORT_INITIALIZED, - MLX5_DEV_EVENT_LID_CHANGE, - MLX5_DEV_EVENT_PKEY_CHANGE, - MLX5_DEV_EVENT_GUID_CHANGE, - MLX5_DEV_EVENT_CLIENT_REREG, -}; - -struct mlx5_uuar_info { - struct mlx5_uar *uars; - int num_uars; - int num_low_latency_uuars; - unsigned long *bitmap; - unsigned int *count; - struct mlx5_bf *bfs; - - /* - * protect uuar allocation data structs - */ - struct mutex lock; + MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ + MLX5_DEV_EVENT_PORT_AFFINITY = 129, + MLX5_DEV_EVENT_MULTIPORT_ESW = 130, }; -struct mlx5_bf { - void __iomem *reg; - void __iomem *regreg; - int buf_size; - struct mlx5_uar *uar; - unsigned long offset; - int need_lock; - /* protect blue flame buffer selection when needed - */ - spinlock_t lock; +enum mlx5_port_status { + MLX5_PORT_UP = 1, + MLX5_PORT_DOWN = 2, +}; - /* serialize 64 bit writes when done as two 32 bit accesses - */ - spinlock_t lock32; - int uuarn; +enum mlx5_cmdif_state { + MLX5_CMDIF_STATE_UNINITIALIZED, + MLX5_CMDIF_STATE_UP, + MLX5_CMDIF_STATE_DOWN, }; struct mlx5_cmd_first { @@ -250,7 +248,7 @@ struct mlx5_cmd_first { struct mlx5_cmd_msg { struct list_head list; - struct cache_ent *cache; + struct cmd_msg_cache *parent; u32 len; struct mlx5_cmd_first first; struct mlx5_cmd_mailbox *next; @@ -258,11 +256,6 @@ struct mlx5_cmd_msg { struct mlx5_cmd_debug { struct dentry *dbg_root; - struct dentry *dbg_in; - struct dentry *dbg_out; - struct dentry *dbg_outlen; - struct dentry *dbg_status; - struct dentry *dbg_run; void *in_msg; void *out_msg; u8 status; @@ -270,38 +263,59 @@ struct mlx5_cmd_debug { u16 outlen; }; -struct cache_ent { +struct cmd_msg_cache { /* protect block chain allocations */ spinlock_t lock; struct list_head head; + unsigned int max_inbox_size; + unsigned int num_ent; }; -struct cmd_msg_cache { - struct cache_ent large; - struct cache_ent med; - +enum { + MLX5_NUM_COMMAND_CACHES = 5, }; struct mlx5_cmd_stats { u64 sum; u64 n; + /* number of times command failed */ + u64 failed; + /* number of times command failed on bad status returned by FW */ + u64 failed_mbox_status; + /* last command failed returned errno */ + u32 last_failed_errno; + /* last bad status returned by FW */ + u8 last_failed_mbox_status; + /* last command failed syndrome returned by FW */ + u32 last_failed_syndrome; struct dentry *root; - struct dentry *avg; - struct dentry *count; /* protect command average calculations */ spinlock_t lock; }; struct mlx5_cmd { + struct mlx5_nb nb; + + /* members which needs to be queried or reinitialized each reload */ + struct { + u16 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + unsigned long bitmask; + struct semaphore sem; + struct semaphore pages_sem; + struct semaphore throttle_sem; + struct semaphore unprivileged_sem; + struct xarray privileged_uids; + } vars; + enum mlx5_cmdif_state state; + void *cmd_alloc_buf; + dma_addr_t alloc_dma; + int alloc_size; void *cmd_buf; dma_addr_t dma; - u16 cmdif_rev; - u8 log_sz; - u8 log_stride; - int max_reg_cmds; - int events; - u32 __iomem *vector; /* protect command queue allocations */ @@ -311,53 +325,16 @@ struct mlx5_cmd { */ spinlock_t token_lock; u8 token; - unsigned long bitmask; char wq_name[MLX5_CMD_WQ_MAX_NAME]; struct workqueue_struct *wq; - struct semaphore sem; - struct semaphore pages_sem; int mode; + u16 allowed_opcode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; - struct pci_pool *pool; + struct dma_pool *pool; struct mlx5_cmd_debug dbg; - struct cmd_msg_cache cache; + struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; int checksum_disabled; - struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; -}; - -struct mlx5_port_caps { - int gid_table_len; - int pkey_table_len; -}; - -struct mlx5_caps { - u8 log_max_eq; - u8 log_max_cq; - u8 log_max_qp; - u8 log_max_mkey; - u8 log_max_pd; - u8 log_max_srq; - u32 max_cqes; - int max_wqes; - int max_sq_desc_sz; - int max_rq_desc_sz; - u64 flags; - u16 stat_rate_support; - int log_max_msg; - int num_ports; - int max_ra_res_qp; - int max_ra_req_qp; - int max_srq_wqes; - int bf_reg_size; - int bf_regs_per_page; - struct mlx5_port_caps port[MLX5_MAX_PORTS]; - u8 ext_port_cap[MLX5_MAX_PORTS]; - int max_vf; - u32 reserved_lkey; - u8 local_ca_ack_delay; - u8 log_max_mcg; - u16 max_qp_mcg; - int min_page_sz; + struct xarray stats; }; struct mlx5_cmd_mailbox { @@ -371,158 +348,456 @@ struct mlx5_buf_list { dma_addr_t map; }; -struct mlx5_buf { - struct mlx5_buf_list direct; - struct mlx5_buf_list *page_list; - int nbufs; +struct mlx5_frag_buf { + struct mlx5_buf_list *frags; int npages; - int page_shift; int size; + u8 page_shift; }; -struct mlx5_eq { - struct mlx5_core_dev *dev; - __be32 __iomem *doorbell; - u32 cons_index; - struct mlx5_buf buf; - int size; - u8 irqn; - u8 eqn; - int nent; - u64 mask; - char name[MLX5_MAX_EQ_NAME]; - struct list_head list; - int index; - struct mlx5_rsc_debug *dbg; +struct mlx5_frag_buf_ctrl { + struct mlx5_buf_list *frags; + u32 sz_m1; + u16 frag_sz_m1; + u16 strides_offset; + u8 log_sz; + u8 log_stride; + u8 log_frag_strides; }; +struct mlx5_core_psv { + u32 psv_idx; + struct psv_layout { + u32 pd; + u16 syndrome; + u16 reserved; + u16 bg; + u16 app_tag; + u32 ref_tag; + } psv; +}; -struct mlx5_core_mr { - u64 iova; - u64 size; - u32 key; - u32 pd; - u32 access; +struct mlx5_core_sig_ctx { + struct mlx5_core_psv psv_memory; + struct mlx5_core_psv psv_wire; + struct ib_sig_err err_item; + bool sig_status_checked; + bool sig_err_exists; + u32 sigerr_count; }; -struct mlx5_core_srq { - u32 srqn; - int max; - int max_gs; - int max_avail_gather; - int wqe_shift; - void (*event) (struct mlx5_core_srq *, enum mlx5_event); +#define MLX5_24BIT_MASK ((1 << 24) - 1) - atomic_t refcount; - struct completion free; +enum mlx5_res_type { + MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, + MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, + MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, + MLX5_RES_SRQ = 3, + MLX5_RES_XSRQ = 4, + MLX5_RES_XRQ = 5, }; -struct mlx5_eq_table { - void __iomem *update_ci; - void __iomem *update_arm_ci; - struct list_head *comp_eq_head; - struct mlx5_eq pages_eq; - struct mlx5_eq async_eq; - struct mlx5_eq cmd_eq; - struct msix_entry *msix_arr; - int num_comp_vectors; - /* protect EQs list - */ - spinlock_t lock; +struct mlx5_core_rsc_common { + enum mlx5_res_type res; + refcount_t refcount; + struct completion free; + bool invalid; }; -struct mlx5_uar { - u32 index; - struct list_head bf_list; - unsigned free_bf_bmap; - void __iomem *wc_map; +struct mlx5_uars_page { void __iomem *map; + bool wc; + u32 index; + struct list_head list; + unsigned int bfregs; + unsigned long *reg_bitmap; /* for non fast path bf regs */ + unsigned long *fp_bitmap; + unsigned int reg_avail; + unsigned int fp_avail; + struct kref ref_count; + struct mlx5_core_dev *mdev; }; +struct mlx5_bfreg_head { + /* protect blue flame registers allocations */ + struct mutex lock; + struct list_head list; +}; + +struct mlx5_bfreg_data { + struct mlx5_bfreg_head reg_head; + struct mlx5_bfreg_head wc_head; +}; + +struct mlx5_sq_bfreg { + void __iomem *map; + struct mlx5_uars_page *up; + bool wc; + u32 index; +}; struct mlx5_core_health { struct health_buffer __iomem *health; __be32 __iomem *health_counter; struct timer_list timer; - struct list_head list; u32 prev; int miss_counter; + u8 synd; + u32 fatal_error; + u32 crdump_size; + struct workqueue_struct *wq; + unsigned long flags; + struct work_struct fatal_report_work; + struct work_struct report_work; + struct devlink_health_reporter *fw_reporter; + struct devlink_health_reporter *fw_fatal_reporter; + struct devlink_health_reporter *vnic_reporter; + struct delayed_work update_fw_log_ts_work; }; -struct mlx5_cq_table { - /* protect radix tree - */ - spinlock_t lock; - struct radix_tree_root tree; +enum { + MLX5_PF_NOTIFY_DISABLE_VF, + MLX5_PF_NOTIFY_ENABLE_VF, }; -struct mlx5_qp_table { - /* protect radix tree +struct mlx5_vf_context { + int enabled; + u64 port_guid; + u64 node_guid; + /* Valid bits are used to validate administrative guid only. + * Enabled after ndo_set_vf_guid */ - spinlock_t lock; - struct radix_tree_root tree; + u8 port_guid_valid:1; + u8 node_guid_valid:1; + enum port_state_policy policy; + struct blocking_notifier_head notifier; +}; + +struct mlx5_core_sriov { + struct mlx5_vf_context *vfs_ctx; + int num_vfs; + u16 max_vfs; + u16 max_ec_vfs; }; -struct mlx5_srq_table { - /* protect radix tree +struct mlx5_events; +struct mlx5_mpfs; +struct mlx5_eswitch; +struct mlx5_lag; +struct mlx5_devcom_dev; +struct mlx5_fw_reset; +struct mlx5_eq_table; +struct mlx5_irq_table; +struct mlx5_sf_dev_table; +struct mlx5_sf_hw_table; +struct mlx5_sf_table; +struct mlx5_crypto_dek_priv; + +struct mlx5_rate_limit { + u32 rate; + u32 max_burst_sz; + u16 typical_pkt_sz; +}; + +struct mlx5_rl_entry { + u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)]; + u64 refcount; + u16 index; + u16 uid; + u8 dedicated : 1; +}; + +struct mlx5_rl_table { + /* protect rate limit table */ + struct mutex rl_lock; + u16 max_size; + u32 max_rate; + u32 min_rate; + struct mlx5_rl_entry *rl_entry; + u64 refcount; +}; + +struct mlx5_core_roce { + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct mlx5_flow_handle *allow_rule; +}; + +enum { + MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0, + MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1, + /* Set during device detach to block any further devices + * creation/deletion on drivers rescan. Unset during device attach. */ - spinlock_t lock; - struct radix_tree_root tree; + MLX5_PRIV_FLAGS_DETACH = 1 << 2, + MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3, }; +struct mlx5_adev { + struct auxiliary_device adev; + struct mlx5_core_dev *mdev; + int idx; +}; + +struct mlx5_debugfs_entries { + struct dentry *dbg_root; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + struct dentry *pages_debugfs; + struct dentry *lag_debugfs; +}; + +enum mlx5_func_type { + MLX5_PF, + MLX5_VF, + MLX5_SF, + MLX5_HOST_PF, + MLX5_EC_VF, + MLX5_FUNC_TYPE_NUM, +}; + +struct mlx5_ft_pool; struct mlx5_priv { - char name[MLX5_MAX_NAME_LEN]; - struct mlx5_eq_table eq_table; - struct mlx5_uuar_info uuari; - MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); + /* IRQ table valid only for real pci devices PF or VF */ + struct mlx5_irq_table *irq_table; + struct mlx5_eq_table *eq_table; /* pages stuff */ + struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; - struct rb_root page_root; - int fw_pages; - int reg_pages; + struct xarray page_root_xa; + atomic_t reg_pages; + struct list_head free_list; + u32 fw_pages; + u32 page_counters[MLX5_FUNC_TYPE_NUM]; + u32 fw_pages_alloc_failed; + u32 give_pages_dropped; + u32 reclaim_pages_discard; struct mlx5_core_health health; + struct list_head traps; - struct mlx5_srq_table srq_table; - - /* start: qp staff */ - struct mlx5_qp_table qp_table; - struct dentry *qp_debugfs; - struct dentry *eq_debugfs; - struct dentry *cq_debugfs; - struct dentry *cmdif_debugfs; - /* end: qp staff */ - - /* start: cq staff */ - struct mlx5_cq_table cq_table; - /* end: cq staff */ + struct mlx5_debugfs_entries dbg; /* start: alloc staff */ + /* protect buffer allocation according to numa node */ + struct mutex alloc_mutex; + int numa_node; + struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ - struct dentry *dbg_root; - /* protect mkey key part */ - spinlock_t mkey_lock; - u8 mkey_key; + struct mlx5_adev **adev; + int adev_idx; + int sw_vhca_id; + struct mlx5_events *events; + struct mlx5_vhca_events *vhca_events; + + struct mlx5_flow_steering *steering; + struct mlx5_mpfs *mpfs; + struct blocking_notifier_head esw_n_head; + struct mlx5_eswitch *eswitch; + struct mlx5_core_sriov sriov; + struct mlx5_lag *lag; + u32 flags; + struct mlx5_devcom_dev *devc; + struct mlx5_devcom_comp_dev *hca_devcom_comp; + struct mlx5_fw_reset *fw_reset; + struct mlx5_core_roce roce; + struct mlx5_fc_stats *fc_stats; + struct mlx5_rl_table rl_table; + struct mlx5_ft_pool *ft_pool; + + struct mlx5_bfreg_data bfregs; + struct mlx5_sq_bfreg bfreg; +#ifdef CONFIG_MLX5_SF + struct mlx5_nb vhca_state_nb; + struct blocking_notifier_head vhca_state_n_head; + struct notifier_block sf_dev_nb; + struct mlx5_sf_dev_table *sf_dev_table; + struct mlx5_core_dev *parent_mdev; +#endif +#ifdef CONFIG_MLX5_SF_MANAGER + struct notifier_block sf_hw_table_vhca_nb; + struct mlx5_sf_hw_table *sf_hw_table; + struct notifier_block sf_table_esw_nb; + struct notifier_block sf_table_vhca_nb; + struct notifier_block sf_table_mdev_nb; + struct mlx5_sf_table *sf_table; +#endif + struct blocking_notifier_head lag_nh; +}; + +enum mlx5_device_state { + MLX5_DEVICE_STATE_UP = 1, + MLX5_DEVICE_STATE_INTERNAL_ERROR, +}; + +enum mlx5_interface_state { + MLX5_INTERFACE_STATE_UP = BIT(0), + MLX5_BREAK_FW_WAIT = BIT(1), +}; + +enum mlx5_pci_status { + MLX5_PCI_STATUS_DISABLED, + MLX5_PCI_STATUS_ENABLED, +}; + +enum mlx5_pagefault_type_flags { + MLX5_PFAULT_REQUESTOR = 1 << 0, + MLX5_PFAULT_WRITE = 1 << 1, + MLX5_PFAULT_RDMA = 1 << 2, +}; + +struct mlx5_td { + /* protects tirs list changes while tirs refresh */ + struct mutex list_lock; + struct list_head tirs_list; + u32 tdn; +}; + +struct mlx5e_resources { + struct mlx5e_hw_objs { + u32 pdn; + struct mlx5_td td; + u32 mkey; + struct mlx5_sq_bfreg *bfregs; + unsigned int num_bfregs; +#define MLX5_MAX_NUM_TC 8 + u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]; + bool tisn_valid; + } hw_objs; + struct net_device *uplink_netdev; + netdevice_tracker tracker; + struct mutex uplink_netdev_lock; + struct mlx5_crypto_dek_priv *dek_priv; +}; + +enum mlx5_sw_icm_type { + MLX5_SW_ICM_TYPE_STEERING, + MLX5_SW_ICM_TYPE_HEADER_MODIFY, + MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN, + MLX5_SW_ICM_TYPE_SW_ENCAP, +}; + +#define MLX5_MAX_RESERVED_GIDS 8 + +struct mlx5_rsvd_gids { + unsigned int start; + unsigned int count; + struct ida ida; +}; + +struct mlx5_clock; +struct mlx5_clock_dev_state; +struct mlx5_dm; +struct mlx5_fw_tracer; +struct mlx5_vxlan; +struct mlx5_geneve; +struct mlx5_hv_vhca; +struct mlx5_st; + +#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) +#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) + +enum { + MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, + MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, +}; + +enum { + MKEY_CACHE_LAST_STD_ENTRY = 20, + MLX5_IMR_KSM_CACHE_ENTRY, + MAX_MKEY_CACHE_ENTRIES +}; + +struct mlx5_profile { + u64 mask; + u8 log_max_qp; + u8 num_cmd_caches; + struct { + int size; + int limit; + } mr_cache[MAX_MKEY_CACHE_ENTRIES]; +}; + +struct mlx5_hca_cap { + u32 cur[MLX5_UN_SZ_DW(hca_cap_union)]; + u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; +}; + +enum mlx5_wc_state { + MLX5_WC_STATE_UNINITIALIZED, + MLX5_WC_STATE_UNSUPPORTED, + MLX5_WC_STATE_SUPPORTED, }; struct mlx5_core_dev { + struct device *device; + enum mlx5_coredev_type coredev_type; struct pci_dev *pdev; + /* sync pci state */ + struct mutex pci_status_mutex; + enum mlx5_pci_status pci_status; u8 rev_id; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; - struct mlx5_caps caps; + struct { + struct mlx5_hca_cap *hca[MLX5_CAP_NUM]; + u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; + u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; + u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; + u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; + u8 embedded_cpu; + } caps; + struct mlx5_timeouts *timeouts; + u64 sys_image_guid; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; - void (*event) (struct mlx5_core_dev *dev, - enum mlx5_dev_event event, - void *data); + phys_addr_t bar_addr; + enum mlx5_device_state state; + /* sync interface state */ + struct mutex intf_state_mutex; + struct lock_class_key lock_key; + unsigned long intf_state; struct mlx5_priv priv; - struct mlx5_profile *profile; - atomic_t num_qps; + struct mlx5_profile profile; + u32 issi; + struct mlx5e_resources mlx5e_res; + struct mlx5_dm *dm; + struct mlx5_st *st; + struct mlx5_vxlan *vxlan; + struct mlx5_geneve *geneve; + struct { + struct mlx5_rsvd_gids reserved_gids; + u32 roce_en; + } roce; +#ifdef CONFIG_MLX5_FPGA + struct mlx5_fpga_device *fpga; +#endif + struct mlx5_clock *clock; + struct mlx5_clock_dev_state *clock_state; + struct mlx5_ib_clock_info *clock_info; + struct mlx5_fw_tracer *tracer; + struct mlx5_rsc_dump *rsc_dump; + u32 vsc_addr; + struct mlx5_hv_vhca *hv_vhca; + struct mlx5_hwmon *hwmon; + u64 num_block_tc; + u64 num_block_ipsec; +#ifdef CONFIG_MLX5_MACSEC + struct mlx5_macsec_fs *macsec_fs; + /* MACsec notifier chain to sync MACsec core and IB database */ + struct blocking_notifier_head macsec_nh; +#endif + u64 num_ipsec_offloads; + struct mlx5_sd *sd; + enum mlx5_wc_state wc_state; + /* sync write combining state */ + struct mutex wc_state_lock; }; struct mlx5_db { @@ -535,29 +810,36 @@ struct mlx5_db { int index; }; -enum { - MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, -}; +#define MLX5_DEFAULT_NUM_DOORBELLS 8 enum { MLX5_COMP_EQ_SIZE = 1024, }; -struct mlx5_db_pgdir { - struct list_head list; - DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); - __be32 *db_page; - dma_addr_t db_dma; +enum { + MLX5_PTYS_IB = 1 << 0, + MLX5_PTYS_EN = 1 << 2, }; typedef void (*mlx5_cmd_cbk_t)(int status, void *context); +enum { + MLX5_CMD_ENT_STATE_PENDING_COMP, + MLX5_CMD_ENT_STATE_TIMEDOUT, +}; + struct mlx5_cmd_work_ent { + unsigned long state; struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; + void *uout; + int uout_size; mlx5_cmd_cbk_t callback; + struct delayed_work cb_timeout_work; void *context; - int idx; + int idx; + struct completion handling; + struct completion slotted; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; @@ -566,42 +848,50 @@ struct mlx5_cmd_work_ent { int page_queue; u8 status; u8 token; - struct timespec ts1; - struct timespec ts2; + u64 ts1; + u64 ts2; + u16 op; + bool polling; + /* Track the max comp handlers */ + refcount_t refcnt; }; -struct mlx5_pas { - u64 pa; - u8 log_sz; +enum phy_port_state { + MLX5_AAA_111 }; -static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) -{ - if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) - return buf->direct.buf + offset; - else - return buf->page_list[offset >> PAGE_SHIFT].buf + - (offset & (PAGE_SIZE - 1)); -} - -extern struct workqueue_struct *mlx5_core_wq; +struct mlx5_hca_vport_context { + u32 field_select; + bool sm_virt_aware; + bool has_smi; + bool has_raw; + enum port_state_policy policy; + enum phy_port_state phys_state; + enum ib_port_state vport_state; + u8 port_physical_state; + u64 sys_image_guid; + u64 port_guid; + u64 node_guid; + u32 cap_mask1; + u32 cap_mask1_perm; + u16 cap_mask2; + u16 cap_mask2_perm; + u16 lid; + u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ + u8 lmc; + u8 subnet_timeout; + u16 sm_lid; + u8 sm_sl; + u16 qkey_violation_counter; + u16 pkey_violation_counter; + bool grh_required; + u8 num_plane; +}; #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field -struct ib_field { - size_t struct_offset_bytes; - size_t struct_size_bytes; - int offset_bits; - int size_bits; -}; - -static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) -{ - return pci_get_drvdata(pdev); -} - extern struct dentry *mlx5_debugfs_root; static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) @@ -619,122 +909,203 @@ static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } -static inline u16 cmdif_rev(struct mlx5_core_dev *dev) +static inline u32 mlx5_base_mkey(const u32 key) +{ + return key & 0xffffff00u; +} + +static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) { - return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; + return ((u32)1 << log_sz) << log_stride; } -static inline void *mlx5_vzalloc(unsigned long size) +static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags, + u8 log_stride, u8 log_sz, + u16 strides_offset, + struct mlx5_frag_buf_ctrl *fbc) { - void *rtn; + fbc->frags = frags; + fbc->log_stride = log_stride; + fbc->log_sz = log_sz; + fbc->sz_m1 = (1 << fbc->log_sz) - 1; + fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; + fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; + fbc->strides_offset = strides_offset; +} - rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); - if (!rtn) - rtn = vzalloc(size); - return rtn; +static inline void mlx5_init_fbc(struct mlx5_buf_list *frags, + u8 log_stride, u8 log_sz, + struct mlx5_frag_buf_ctrl *fbc) +{ + mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); } -static inline void mlx5_vfree(const void *addr) +static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, + u32 ix) { - if (addr && is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + unsigned int frag; + + ix += fbc->strides_offset; + frag = ix >> fbc->log_frag_strides; + + return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); +} + +static inline u32 +mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) +{ + u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; + + return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); } -int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); -void mlx5_dev_cleanup(struct mlx5_core_dev *dev); -int mlx5_cmd_init(struct mlx5_core_dev *dev); -void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); +enum { + CMD_ALLOWED_OPCODE_ALL, +}; + void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); -int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); +void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); + +struct mlx5_async_ctx { + struct mlx5_core_dev *dev; + atomic_t num_inflight; + struct completion inflight_done; +}; + +struct mlx5_async_work; + +typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); + +struct mlx5_async_work { + struct mlx5_async_ctx *ctx; + mlx5_async_cbk_t user_callback; + u16 opcode; /* cmd opcode */ + u16 op_mod; /* cmd op_mod */ + u8 throttle_locked:1; + u8 unpriv_locked:1; + void *out; /* pointer to the cmd output buffer */ +}; + +void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, + struct mlx5_async_ctx *ctx); +void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); +int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, + void *out, int out_size, mlx5_async_cbk_t callback, + struct mlx5_async_work *work); +void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out); +int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); +int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); -int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); -int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); -int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); -int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); -void mlx5_health_cleanup(void); -void __init mlx5_health_init(void); + +#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ + ({ \ + mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ + MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ + }) + +#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ + ({ \ + u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ + mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ + }) + +int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size); +bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); +int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid); +void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid); + +void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev); +void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev); + +void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data); + +void mlx5_health_cleanup(struct mlx5_core_dev *dev); +int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); -void mlx5_stop_health_poll(struct mlx5_core_dev *dev); -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, - struct mlx5_buf *buf); -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); -struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, - gfp_t flags, int npages); -void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, - struct mlx5_cmd_mailbox *head); -int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen); -int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); -int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out); -int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - u16 lwm, int is_srq); -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, - struct mlx5_create_mkey_mbox_in *in, int inlen); -int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); -int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, - struct mlx5_query_mkey_mbox_out *out, int outlen); -int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, - u32 *mkey); +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); +void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev); +void mlx5_drain_health_wq(struct mlx5_core_dev *dev); +void mlx5_trigger_health_work(struct mlx5_core_dev *dev); +int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_frag_buf *buf, int node); +void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, + int inlen); +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey); +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out, + int outlen); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); -int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, - u16 opmod, int port); -void mlx5_pagealloc_init(struct mlx5_core_dev *dev); +int mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); -int mlx5_pagealloc_start(struct mlx5_core_dev *dev); +void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); -void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s16 npages); -int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev); +void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); -int mlx5_eq_init(struct mlx5_core_dev *dev); -void mlx5_eq_cleanup(struct mlx5_core_dev *dev); -void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); -void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); -void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); -void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); -struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); -void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); -int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, struct mlx5_uar *uar); -int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -int mlx5_start_eqs(struct mlx5_core_dev *dev); -int mlx5_stop_eqs(struct mlx5_core_dev *dev); + +void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); +void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); +int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); -int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); +struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev); +void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, + void *data_out, int size_out, u16 reg_id, int arg, + int write, bool verbose); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps); - -int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, - struct mlx5_query_eq_mbox_out *out, int outlen); -int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); -void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); -int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); -void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); -int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); + +int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, + int node); + +static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); +} + void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); -typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size); -int mlx5_register_health_report_handler(health_handler_t handler); -void mlx5_unregister_health_report_handler(void); const char *mlx5_command_str(int command); -int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, + int npsvs, u32 *sig_index); +int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); +__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev); +void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); + +int mlx5_init_rl_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, + struct mlx5_rate_limit *rl); +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); +bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); +int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, + bool dedicated_entry, u16 *index); +void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index); +bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, + struct mlx5_rate_limit *rl_1); +int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, + bool map_wc, bool fast_path); +void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); + +unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev); +int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector); +unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); +int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, + u8 roce_version, u8 roce_l3_type, const u8 *gid, + const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); static inline u32 mlx5_mkey_to_idx(u32 mkey) { @@ -746,24 +1117,276 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) return mkey_idx << 8; } +static inline u8 mlx5_mkey_variant(u32 mkey) +{ + return mkey & 0xff; +} + +/* Async-atomic event notifier used by mlx5 core to forward FW + * evetns received from event queue to mlx5 consumers. + * Optimise event queue dipatching. + */ +int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); + +/* Async-atomic event notifier used for forwarding + * evetns from the event queue into the to mlx5 events dispatcher, + * eswitch, clock and others. + */ +int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); +int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); + +/* Blocking event notifier used to forward SW events, used for slow path */ +int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event, + void *data); + +int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); + +int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); +int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); +bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); +bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); +bool mlx5_lag_is_active(struct mlx5_core_dev *dev); +bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev); +bool mlx5_lag_is_master(struct mlx5_core_dev *dev); +bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev); +bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev); +u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, + struct net_device *slave); +int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, + u64 *values, + int num_counters, + size_t *offsets); +struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i); + +#define mlx5_lag_for_each_peer_mdev(dev, peer, i) \ + for (i = 0, peer = mlx5_lag_get_next_peer_mdev(dev, &i); \ + peer; \ + peer = mlx5_lag_get_next_peer_mdev(dev, &i)) + +u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev); +struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); +void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); +int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u32 log_alignment, u16 uid, + phys_addr_t *addr, u32 *obj_id); +int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u16 uid, phys_addr_t addr, u32 obj_id); + +#ifdef CONFIG_PCIE_TPH +int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *st_index); +int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index); +#else +static inline int mlx5_st_alloc_index(struct mlx5_core_dev *dev, + enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *st_index) +{ + return -EOPNOTSUPP; +} +static inline int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index) +{ + return -EOPNOTSUPP; +} +#endif + +struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev); +void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev); + +int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev, + int vf_id, + struct notifier_block *nb); +void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev, + int vf_id, + struct notifier_block *nb); +int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, + struct ib_device *device, + struct rdma_netdev_alloc_params *params); + enum { - MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, - MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1, - MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2, + MLX5_PCI_DEV_IS_VF = 1 << 0, }; +static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) +{ + return dev->coredev_type == MLX5_COREDEV_PF; +} + +static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) +{ + return dev->coredev_type == MLX5_COREDEV_VF; +} + +static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1, + const struct mlx5_core_dev *dev2) +{ + return dev1->coredev_type == dev2->coredev_type; +} + +static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) +{ + return dev->caps.embedded_cpu; +} + +static inline bool +mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) +{ + return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); +} + +static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) +{ + return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); +} + +static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) +{ + return dev->priv.sriov.max_vfs; +} + +static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) +{ + /* LACP owner conditions: + * 1) Function is physical. + * 2) LAG is supported by FW. + * 3) LAG is managed by driver (currently the only option). + */ + return MLX5_CAP_GEN(dev, vport_group_manager) && + (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && + MLX5_CAP_GEN(dev, lag_master); +} + +static inline u16 mlx5_core_max_ec_vfs(const struct mlx5_core_dev *dev) +{ + return dev->priv.sriov.max_ec_vfs; +} + +static inline int mlx5_get_gid_table_len(u16 param) +{ + if (param > 4) { + pr_warn("gid table length is zero\n"); + return 0; + } + + return 8 * (1 << param); +} + +static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) +{ + return !!(dev->priv.rl_table.max_size); +} + +static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && + MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; +} + +static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; +} + +static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev) +{ + return mlx5_core_is_mp_slave(dev) || + mlx5_core_is_mp_master(dev); +} + +static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) +{ + if (!mlx5_core_mp_enabled(dev)) + return 1; + + return MLX5_CAP_GEN(dev, native_port_num); +} + +static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev) +{ + int idx = MLX5_CAP_GEN(dev, native_port_num); + + if (idx >= 1 && idx <= MLX5_MAX_PORTS) + return idx - 1; + else + return PCI_FUNC(dev->pdev->devfn); +} + enum { - MAX_MR_CACHE_ENTRIES = 16, + MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; -struct mlx5_profile { - u64 mask; - u32 log_max_qp; - int cmdif_csum; - struct { - int size; - int limit; - } mr_cache[MAX_MR_CACHE_ENTRIES]; +bool mlx5_is_roce_on(struct mlx5_core_dev *dev); + +static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev) +{ + if (MLX5_CAP_GEN(dev, roce_rw_supported)) + return MLX5_CAP_GEN(dev, roce); + + /* If RoCE cap is read-only in FW, get RoCE state from devlink + * in order to support RoCE enable/disable feature + */ + return mlx5_is_roce_on(dev); +} + +#ifdef CONFIG_MLX5_MACSEC +static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) +{ + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD)) + return false; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) || + !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec)) + return false; + + if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) || + !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt)) + return false; + + if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) && + !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt)) + return false; + + return true; +} + +#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX) + +static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev) +{ + if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & + NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) || + !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) || + !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs) + return false; + + return true; +} +#endif + +enum { + MLX5_OCTWORD = 16, }; +bool mlx5_wc_support_get(struct mlx5_core_dev *mdev); + +static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) +{ + return devlink_net(priv_to_devlink(dev)); +} + +#define MLX5_SW_IMAGE_GUID_MAX_BYTES 9 + #endif /* MLX5_DRIVER_H */ |
