diff options
Diffstat (limited to 'include/rdma/ib_verbs.h')
| -rw-r--r-- | include/rdma/ib_verbs.h | 336 |
1 files changed, 256 insertions, 80 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 1e7774ac808f..6aad66bc5dd7 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -42,6 +42,7 @@ #include <rdma/signature.h> #include <uapi/rdma/rdma_user_ioctl.h> #include <uapi/rdma/ib_user_ioctl_verbs.h> +#include <linux/pci-tph.h> #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN @@ -59,9 +60,6 @@ extern struct workqueue_struct *ib_comp_unbound_wq; struct ib_ucq_object; -__printf(3, 4) __cold -void ibdev_printk(const char *level, const struct ib_device *ibdev, - const char *format, ...); __printf(2, 3) __cold void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...); __printf(2, 3) __cold @@ -317,17 +315,19 @@ enum ib_atomic_cap { }; enum ib_odp_general_cap_bits { - IB_ODP_SUPPORT = 1 << 0, - IB_ODP_SUPPORT_IMPLICIT = 1 << 1, + IB_ODP_SUPPORT = IB_UVERBS_ODP_SUPPORT, + IB_ODP_SUPPORT_IMPLICIT = IB_UVERBS_ODP_SUPPORT_IMPLICIT, }; enum ib_odp_transport_cap_bits { - IB_ODP_SUPPORT_SEND = 1 << 0, - IB_ODP_SUPPORT_RECV = 1 << 1, - IB_ODP_SUPPORT_WRITE = 1 << 2, - IB_ODP_SUPPORT_READ = 1 << 3, - IB_ODP_SUPPORT_ATOMIC = 1 << 4, - IB_ODP_SUPPORT_SRQ_RECV = 1 << 5, + IB_ODP_SUPPORT_SEND = IB_UVERBS_ODP_SUPPORT_SEND, + IB_ODP_SUPPORT_RECV = IB_UVERBS_ODP_SUPPORT_RECV, + IB_ODP_SUPPORT_WRITE = IB_UVERBS_ODP_SUPPORT_WRITE, + IB_ODP_SUPPORT_READ = IB_UVERBS_ODP_SUPPORT_READ, + IB_ODP_SUPPORT_ATOMIC = IB_UVERBS_ODP_SUPPORT_ATOMIC, + IB_ODP_SUPPORT_SRQ_RECV = IB_UVERBS_ODP_SUPPORT_SRQ_RECV, + IB_ODP_SUPPORT_FLUSH = IB_UVERBS_ODP_SUPPORT_FLUSH, + IB_ODP_SUPPORT_ATOMIC_WRITE = IB_UVERBS_ODP_SUPPORT_ATOMIC_WRITE, }; struct ib_odp_caps { @@ -522,6 +522,23 @@ enum ib_port_state { IB_PORT_ACTIVE_DEFER = 5 }; +static inline const char *__attribute_const__ +ib_port_state_to_str(enum ib_port_state state) +{ + const char * const states[] = { + [IB_PORT_NOP] = "NOP", + [IB_PORT_DOWN] = "DOWN", + [IB_PORT_INIT] = "INIT", + [IB_PORT_ARMED] = "ARMED", + [IB_PORT_ACTIVE] = "ACTIVE", + [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER", + }; + + if (state < ARRAY_SIZE(states)) + return states[state]; + return "UNKNOWN"; +} + enum ib_port_phys_state { IB_PORT_PHYS_STATE_SLEEP = 1, IB_PORT_PHYS_STATE_POLLING = 2, @@ -561,6 +578,7 @@ enum ib_port_speed { IB_SPEED_EDR = 32, IB_SPEED_HDR = 64, IB_SPEED_NDR = 128, + IB_SPEED_XDR = 256, }; enum ib_stat_flag { @@ -568,10 +586,10 @@ enum ib_stat_flag { }; /** - * struct rdma_stat_desc - * @name - The name of the counter - * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL - * @priv - Driver private information; Core code should not use + * struct rdma_stat_desc - description of one rdma stat/counter + * @name: The name of the counter + * @flags: Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL + * @priv: Driver private information; Core code should not use */ struct rdma_stat_desc { const char *name; @@ -580,24 +598,24 @@ struct rdma_stat_desc { }; /** - * struct rdma_hw_stats - * @lock - Mutex to protect parallel write access to lifespan and values + * struct rdma_hw_stats - collection of hardware stats and their management + * @lock: Mutex to protect parallel write access to lifespan and values * of counters, which are 64bits and not guaranteed to be written * atomicaly on 32bits systems. - * @timestamp - Used by the core code to track when the last update was - * @lifespan - Used by the core code to determine how old the counters + * @timestamp: Used by the core code to track when the last update was + * @lifespan: Used by the core code to determine how old the counters * should be before being updated again. Stored in jiffies, defaults * to 10 milliseconds, drivers can override the default be specifying * their own value during their allocation routine. - * @descs - Array of pointers to static descriptors used for the counters + * @descs: Array of pointers to static descriptors used for the counters * in directory. - * @is_disabled - A bitmap to indicate each counter is currently disabled + * @is_disabled: A bitmap to indicate each counter is currently disabled * or not. - * @num_counters - How many hardware counters there are. If name is + * @num_counters: How many hardware counters there are. If name is * shorter than this number, a kernel oops will result. Driver authors * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) * in their code to prevent this. - * @value - Array of u64 counters that are accessed by the sysfs code and + * @value: Array of u64 counters that are accessed by the sysfs code and * filled in by the drivers get_stats routine */ struct rdma_hw_stats { @@ -607,7 +625,7 @@ struct rdma_hw_stats { const struct rdma_stat_desc *descs; unsigned long *is_disabled; int num_counters; - u64 value[]; + u64 value[] __counted_by(num_counters); }; #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 @@ -840,6 +858,8 @@ enum ib_rate { IB_RATE_50_GBPS = 20, IB_RATE_400_GBPS = 21, IB_RATE_600_GBPS = 22, + IB_RATE_800_GBPS = 23, + IB_RATE_1600_GBPS = 25, }; /** @@ -1094,7 +1114,7 @@ struct ib_qp_cap { /* * Maximum number of rdma_rw_ctx structures in flight at a time. - * ib_create_qp() will calculate the right amount of neededed WRs + * ib_create_qp() will calculate the right amount of needed WRs * and MRs based on this. */ u32 max_rdma_ctxs; @@ -1514,6 +1534,7 @@ struct ib_ucontext { struct ib_uverbs_file *ufile; struct ib_rdmacg_object cg_obj; + u64 enabled_caps; /* * Implementation details of the RDMA core, don't use in drivers: */ @@ -1786,6 +1807,7 @@ struct ib_qp { struct list_head rdma_mrs; struct list_head sig_mrs; struct ib_srq *srq; + struct completion srq_completion; struct ib_xrcd *xrcd; /* XRC TGT QPs only */ struct list_head xrcd_list; @@ -1795,6 +1817,7 @@ struct ib_qp { struct ib_qp *real_qp; struct ib_uqp_object *uobject; void (*event_handler)(struct ib_event *, void *); + void (*registered_event_handler)(struct ib_event *, void *); void *qp_context; /* sgid_attrs associated with the AV's */ const struct ib_gid_attr *av_sgid_attr; @@ -1825,6 +1848,27 @@ struct ib_dm { atomic_t usecnt; }; +/* bit values to mark existence of ib_dmah fields */ +enum { + IB_DMAH_CPU_ID_EXISTS, + IB_DMAH_MEM_TYPE_EXISTS, + IB_DMAH_PH_EXISTS, +}; + +struct ib_dmah { + struct ib_device *device; + struct ib_uobject *uobject; + /* + * Implementation details of the RDMA core, don't use in drivers: + */ + struct rdma_restrack_entry res; + u32 cpu_id; + enum tph_mem_type mem_type; + atomic_t usecnt; + u8 ph; + u8 valid_fields; /* use IB_DMAH_XXX_EXISTS */ +}; + struct ib_mr { struct ib_device *device; struct ib_pd *pd; @@ -1842,6 +1886,7 @@ struct ib_mr { struct ib_dm *dm; struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */ + struct ib_dmah *dmah; /* * Implementation details of the RDMA core, don't use in drivers: */ @@ -1908,8 +1953,6 @@ struct ib_flow_eth_filter { u8 src_mac[6]; __be16 ether_type; __be16 vlan_tag; - /* Must be last */ - u8 real_sz[]; }; struct ib_flow_spec_eth { @@ -1922,8 +1965,6 @@ struct ib_flow_spec_eth { struct ib_flow_ib_filter { __be16 dlid; __u8 sl; - /* Must be last */ - u8 real_sz[]; }; struct ib_flow_spec_ib { @@ -1947,8 +1988,6 @@ struct ib_flow_ipv4_filter { u8 tos; u8 ttl; u8 flags; - /* Must be last */ - u8 real_sz[]; }; struct ib_flow_spec_ipv4 { @@ -1965,9 +2004,7 @@ struct ib_flow_ipv6_filter { u8 next_hdr; u8 traffic_class; u8 hop_limit; - /* Must be last */ - u8 real_sz[]; -}; +} __packed; struct ib_flow_spec_ipv6 { u32 type; @@ -1979,8 +2016,6 @@ struct ib_flow_spec_ipv6 { struct ib_flow_tcp_udp_filter { __be16 dst_port; __be16 src_port; - /* Must be last */ - u8 real_sz[]; }; struct ib_flow_spec_tcp_udp { @@ -1992,7 +2027,6 @@ struct ib_flow_spec_tcp_udp { struct ib_flow_tunnel_filter { __be32 tunnel_id; - u8 real_sz[]; }; /* ib_flow_spec_tunnel describes the Vxlan tunnel @@ -2008,8 +2042,6 @@ struct ib_flow_spec_tunnel { struct ib_flow_esp_filter { __be32 spi; __be32 seq; - /* Must be last */ - u8 real_sz[]; }; struct ib_flow_spec_esp { @@ -2023,8 +2055,6 @@ struct ib_flow_gre_filter { __be16 c_ks_res0_ver; __be16 protocol; __be32 key; - /* Must be last */ - u8 real_sz[]; }; struct ib_flow_spec_gre { @@ -2036,8 +2066,6 @@ struct ib_flow_spec_gre { struct ib_flow_mpls_filter { __be32 tag; - /* Must be last */ - u8 real_sz[]; }; struct ib_flow_spec_mpls { @@ -2190,6 +2218,7 @@ struct ib_port_cache { struct ib_gid_table *gid; u8 lmc; enum ib_port_state port_state; + enum ib_port_state last_port_state; }; struct ib_port_immutable { @@ -2269,7 +2298,9 @@ struct rdma_netdev_alloc_params { struct ib_odp_counters { atomic64_t faults; + atomic64_t faults_handled; atomic64_t invalidations; + atomic64_t invalidations_handled; atomic64_t prefetch; }; @@ -2375,7 +2406,7 @@ struct ib_device_ops { int (*modify_port)(struct ib_device *device, u32 port_num, int port_modify_mask, struct ib_port_modify *port_modify); - /** + /* * The following mandatory functions are used only at device * registration. Keep functions such as these at the end of this * structure to avoid cache line misses when accessing struct ib_device @@ -2385,7 +2416,7 @@ struct ib_device_ops { struct ib_port_immutable *immutable); enum rdma_link_layer (*get_link_layer)(struct ib_device *device, u32 port_num); - /** + /* * When calling get_netdev, the HW vendor's driver should return the * net device of device @device at port @port_num or NULL if such * a net device doesn't exist. The vendor driver should call dev_hold @@ -2395,7 +2426,7 @@ struct ib_device_ops { */ struct net_device *(*get_netdev)(struct ib_device *device, u32 port_num); - /** + /* * rdma netdev operation * * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params @@ -2409,14 +2440,14 @@ struct ib_device_ops { int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, struct rdma_netdev_alloc_params *params); - /** + /* * query_gid should be return GID value for @device, when @port_num * link layer is either IB or iWarp. It is no-op if @port_num port * is RoCE link layer. */ int (*query_gid)(struct ib_device *device, u32 port_num, int index, union ib_gid *gid); - /** + /* * When calling add_gid, the HW vendor's driver should add the gid * of device of port at gid index available at @attr. Meta-info of * that gid (for example, the network device related to this gid) is @@ -2430,7 +2461,7 @@ struct ib_device_ops { * roce_gid_table is used. */ int (*add_gid)(const struct ib_gid_attr *attr, void **context); - /** + /* * When calling del_gid, the HW vendor's driver should delete the * gid of device @device at gid index gid_index of port port_num * available in @attr. @@ -2445,7 +2476,7 @@ struct ib_device_ops { struct ib_udata *udata); void (*dealloc_ucontext)(struct ib_ucontext *context); int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); - /** + /* * This will be called once refcount of an entry in mmap_xa reaches * zero. The type of the memory that was mapped may differ between * entries and is opaque to the rdma_user_mmap interface. @@ -2478,18 +2509,33 @@ struct ib_device_ops { int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, - struct ib_udata *udata); + struct uverbs_attr_bundle *attrs); + int (*create_cq_umem)(struct ib_cq *cq, + const struct ib_cq_init_attr *attr, + struct ib_umem *umem, + struct uverbs_attr_bundle *attrs); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); + /* + * pre_destroy_cq - Prevent a cq from generating any new work + * completions, but not free any kernel resources + */ + int (*pre_destroy_cq)(struct ib_cq *cq); + /* + * post_destroy_cq - Free all kernel resources + */ + void (*post_destroy_cq)(struct ib_cq *cq); struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, + struct ib_dmah *dmah, struct ib_udata *udata); struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset, u64 length, u64 virt_addr, int fd, int mr_access_flags, - struct ib_udata *udata); + struct ib_dmah *dmah, + struct uverbs_attr_bundle *attrs); struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, @@ -2553,6 +2599,9 @@ struct ib_device_ops { struct ib_dm_alloc_attr *attr, struct uverbs_attr_bundle *attrs); int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs); + int (*alloc_dmah)(struct ib_dmah *ibdmah, + struct uverbs_attr_bundle *attrs); + int (*dealloc_dmah)(struct ib_dmah *dmah, struct uverbs_attr_bundle *attrs); struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, struct ib_dm_mr_attr *attr, struct uverbs_attr_bundle *attrs); @@ -2567,7 +2616,7 @@ struct ib_device_ops { struct scatterlist *meta_sg, int meta_sg_nents, unsigned int *meta_sg_offset); - /** + /* * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and * fill in the driver initialized data. The struct is kfree()'ed by * the sysfs core when the device is removed. A lifespan of -1 in the @@ -2576,7 +2625,7 @@ struct ib_device_ops { struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device); struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device, u32 port_num); - /** + /* * get_hw_stats - Fill in the counter value(s) in the stats struct. * @index - The index in the value array we wish to have updated, or * num_counters if we want all stats updated @@ -2591,14 +2640,14 @@ struct ib_device_ops { int (*get_hw_stats)(struct ib_device *device, struct rdma_hw_stats *stats, u32 port, int index); - /** + /* * modify_hw_stat - Modify the counter configuration * @enable: true/false when enable/disable a counter * Return codes - 0 on success or error code otherwise. */ int (*modify_hw_stat)(struct ib_device *device, u32 port, unsigned int counter_index, bool enable); - /** + /* * Allows rdma drivers to add their own restrack attributes. */ int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); @@ -2608,6 +2657,8 @@ struct ib_device_ops { int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp); int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id); + int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq); + int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq); /* Device lifecycle callbacks */ /* @@ -2632,33 +2683,39 @@ struct ib_device_ops { u8 pdata_len); int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog); int (*iw_destroy_listen)(struct iw_cm_id *cm_id); - /** + /* * counter_bind_qp - Bind a QP to a counter. * @counter - The counter to be bound. If counter->id is zero then * the driver needs to allocate a new counter and set counter->id */ - int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp); - /** + int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp, + u32 port); + /* * counter_unbind_qp - Unbind the qp from the dynamically-allocated * counter and bind it onto the default one */ - int (*counter_unbind_qp)(struct ib_qp *qp); - /** + int (*counter_unbind_qp)(struct ib_qp *qp, u32 port); + /* * counter_dealloc -De-allocate the hw counter */ int (*counter_dealloc)(struct rdma_counter *counter); - /** + /* * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in * the driver initialized data. */ struct rdma_hw_stats *(*counter_alloc_stats)( struct rdma_counter *counter); - /** + /* * counter_update_stats - Query the stats value of this counter */ int (*counter_update_stats)(struct rdma_counter *counter); - /** + /* + * counter_init - Initialize the driver specific rdma counter struct. + */ + void (*counter_init)(struct rdma_counter *counter); + + /* * Allows rdma drivers to add their own restrack attributes * dumped via 'rdma stat' iproute2 command. */ @@ -2674,9 +2731,35 @@ struct ib_device_ops { */ int (*get_numa_node)(struct ib_device *dev); + /* + * add_sub_dev - Add a sub IB device + */ + struct ib_device *(*add_sub_dev)(struct ib_device *parent, + enum rdma_nl_dev_type type, + const char *name); + + /* + * del_sub_dev - Delete a sub IB device + */ + void (*del_sub_dev)(struct ib_device *sub_dev); + + /* + * ufile_cleanup - Attempt to cleanup ubojects HW resources inside + * the ufile. + */ + void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile); + + /* + * report_port_event - Drivers need to implement this if they have + * some private stuff to handle when link status changes. + */ + void (*report_port_event)(struct ib_device *ibdev, + struct net_device *ndev, unsigned long event); + DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_counters); DECLARE_RDMA_OBJ_SIZE(ib_cq); + DECLARE_RDMA_OBJ_SIZE(ib_dmah); DECLARE_RDMA_OBJ_SIZE(ib_mw); DECLARE_RDMA_OBJ_SIZE(ib_pd); DECLARE_RDMA_OBJ_SIZE(ib_qp); @@ -2684,6 +2767,7 @@ struct ib_device_ops { DECLARE_RDMA_OBJ_SIZE(ib_srq); DECLARE_RDMA_OBJ_SIZE(ib_ucontext); DECLARE_RDMA_OBJ_SIZE(ib_xrcd); + DECLARE_RDMA_OBJ_SIZE(rdma_counter); }; struct ib_core_device { @@ -2736,6 +2820,7 @@ struct ib_device { * It is a NULL terminated array. */ const struct attribute_group *groups[4]; + u8 hw_stats_attr_index; u64 uverbs_cmd_mask; @@ -2784,6 +2869,17 @@ struct ib_device { char iw_ifname[IFNAMSIZ]; u32 iw_driver_flags; u32 lag_flags; + + /* A parent device has a list of sub-devices */ + struct mutex subdev_lock; + struct list_head subdev_list_head; + + /* A sub device has a type and a parent */ + enum rdma_nl_dev_type type; + struct ib_device *parent; + struct list_head subdev_list; + + enum rdma_nl_name_assign_type name_assign_type; }; static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size, @@ -2846,16 +2942,24 @@ struct ib_block_iter { /* internal states */ struct scatterlist *__sg; /* sg holding the current aligned block */ dma_addr_t __dma_addr; /* unaligned DMA address of this block */ + size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */ unsigned int __sg_nents; /* number of SG entries */ unsigned int __sg_advance; /* number of bytes to advance in sg in next step */ unsigned int __pg_bit; /* alignment of current block */ }; -struct ib_device *_ib_alloc_device(size_t size); +struct ib_device *_ib_alloc_device(size_t size, struct net *net); #define ib_alloc_device(drv_struct, member) \ container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ - struct drv_struct, member))), \ + struct drv_struct, member)), \ + &init_net), \ + struct drv_struct, member) + +#define ib_alloc_device_with_net(drv_struct, member, net) \ + container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ + BUILD_BUG_ON_ZERO(offsetof( \ + struct drv_struct, member)), net), \ struct drv_struct, member) void ib_dealloc_device(struct ib_device *device); @@ -2935,6 +3039,14 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, size_t length, u32 min_pgoff, u32 max_pgoff); +#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) +void rdma_user_mmap_disassociate(struct ib_device *device); +#else +static inline void rdma_user_mmap_disassociate(struct ib_device *device) +{ +} +#endif + static inline int rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext, struct rdma_user_mmap_entry *entry, @@ -3046,8 +3158,8 @@ static inline u32 rdma_start_port(const struct ib_device *device) /** * rdma_for_each_port - Iterate over all valid port numbers of the IB device - * @device - The struct ib_device * to iterate over - * @iter - The unsigned int to store the port number + * @device: The struct ib_device * to iterate over + * @iter: The unsigned int to store the port number */ #define rdma_for_each_port(device, iter) \ for (iter = rdma_start_port(device + \ @@ -3413,7 +3525,7 @@ static inline bool rdma_core_cap_opa_port(struct ib_device *device, /** * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value. * @device: Device - * @port_num: Port number + * @port: Port number * @mtu: enum value of MTU * * Return the MTU size supported by the port as an integer value. Will return @@ -3431,7 +3543,7 @@ static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port, /** * rdma_mtu_from_attr - Return the mtu of the port from the port attribute. * @device: Device - * @port_num: Port number + * @port: Port number * @attr: port attribute * * Return the MTU size supported by the port as an integer value. @@ -3808,7 +3920,7 @@ static inline int ib_destroy_qp(struct ib_qp *qp) /** * ib_open_qp - Obtain a reference to an existing sharable QP. - * @xrcd - XRC domain + * @xrcd: XRC domain * @qp_open_attr: Attributes identifying the QP to open. * * Returns a reference to a sharable QP. @@ -4162,9 +4274,9 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, /** * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses * @dev: The device for which the DMA addresses are to be created - * @sg: The sg_table object describing the buffer + * @sgt: The sg_table object describing the buffer * @direction: The direction of the DMA - * @attrs: Optional DMA attributes for the map operation + * @dma_attrs: Optional DMA attributes for the map operation */ static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev, struct sg_table *sgt, @@ -4308,8 +4420,8 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, /** * ib_update_fast_reg_key - updates the key portion of the fast_reg MR * R_Key and L_Key. - * @mr - struct ib_mr pointer to be updated. - * @newkey - new key to be used. + * @mr: struct ib_mr pointer to be updated. + * @newkey: new key to be used. */ static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) { @@ -4320,7 +4432,7 @@ static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) /** * ib_inc_rkey - increments the key portion of the given rkey. Can be used * for calculating a new rkey for type 2 memory windows. - * @rkey - the rkey to increment. + * @rkey: the rkey to increment. */ static inline u32 ib_inc_rkey(u32 rkey) { @@ -4414,7 +4526,7 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, /** * ib_device_try_get: Hold a registration lock - * device: The device to lock + * @dev: The device to lock * * A device under an active registration lock cannot become unregistered. It * is only possible to obtain a registration lock on a device that is fully @@ -4440,8 +4552,19 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port, const struct sockaddr *addr); int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, unsigned int port); -struct net_device *ib_device_netdev(struct ib_device *dev, u32 port); +struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, + u32 port); +int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev, + u32 *port); + +static inline enum ib_port_state ib_get_curr_port_state(struct net_device *net_dev) +{ + return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? + IB_PORT_ACTIVE : IB_PORT_DOWN; +} +void ib_dispatch_port_state_event(struct ib_device *ibdev, + struct net_device *ndev); struct ib_wq *ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr); int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata); @@ -4653,6 +4776,8 @@ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, return RDMA_AH_ATTR_TYPE_OPA; return RDMA_AH_ATTR_TYPE_IB; } + if (dev->type == RDMA_DEVICE_TYPE_SMI) + return RDMA_AH_ATTR_TYPE_IB; return RDMA_AH_ATTR_TYPE_UNDEFINED; } @@ -4708,13 +4833,29 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector) * rdma_roce_rescan_device - Rescan all of the network devices in the system * and add their gids, as needed, to the relevant RoCE devices. * - * @device: the rdma device + * @ibdev: the rdma device */ void rdma_roce_rescan_device(struct ib_device *ibdev); +void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port); +void roce_del_all_netdev_gids(struct ib_device *ib_dev, + u32 port, struct net_device *ndev); struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); +#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); +bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs); +#else +static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs) +{ + return 0; +} +static inline bool +rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs) +{ + return false; +} +#endif struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, @@ -4745,7 +4886,7 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device) /** * ibdev_to_node - return the NUMA node for a given ib_device - * @dev: device to get the NUMA node for. + * @ibdev: device to get the NUMA node for. */ static inline int ibdev_to_node(struct ib_device *ibdev) { @@ -4770,6 +4911,12 @@ static inline int ibdev_to_node(struct ib_device *ibdev) bool rdma_dev_access_netns(const struct ib_device *device, const struct net *net); +bool rdma_dev_has_raw_cap(const struct ib_device *dev); +static inline struct net *rdma_dev_net(struct ib_device *device) +{ + return read_pnet(&device->coredev.rdma_net); +} + #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF) #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) @@ -4777,6 +4924,7 @@ bool rdma_dev_access_netns(const struct ib_device *device, /** * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based * on the flow_label + * @fl: flow_label value * * This function will convert the 20 bit flow_label input to a valid RoCE v2 * UDP src port 14 bit value. All RoCE V2 drivers should use this same @@ -4834,4 +4982,32 @@ static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn) const struct ib_port_immutable* ib_port_immutable_read(struct ib_device *dev, unsigned int port); + +/** ib_add_sub_device - Add a sub IB device on an existing one + * + * @parent: The IB device that needs to add a sub device + * @type: The type of the new sub device + * @name: The name of the new sub device + * + * + * Return 0 on success, an error code otherwise + */ +int ib_add_sub_device(struct ib_device *parent, + enum rdma_nl_dev_type type, + const char *name); + + +/** ib_del_sub_device_and_put - Delect an IB sub device while holding a 'get' + * + * @sub: The sub device that is going to be deleted + * + * Return 0 on success, an error code otherwise + */ +int ib_del_sub_device_and_put(struct ib_device *sub); + +static inline void ib_mark_name_assigned_by_user(struct ib_device *ibdev) +{ + ibdev->name_assign_type = RDMA_NAME_ASSIGN_TYPE_USER; +} + #endif /* IB_VERBS_H */ |
