diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/iw_cxgb4.h')
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 196 |
1 files changed, 70 insertions, 126 deletions
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index f0fceadd0d12..e17c1252536b 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -34,7 +34,7 @@ #include <linux/mutex.h> #include <linux/list.h> #include <linux/spinlock.h> -#include <linux/idr.h> +#include <linux/xarray.h> #include <linux/completion.h> #include <linux/netdevice.h> #include <linux/sched/mm.h> @@ -314,17 +314,15 @@ enum db_state { struct c4iw_dev { struct ib_device ibdev; struct c4iw_rdev rdev; - u32 device_cap_flags; - struct idr cqidr; - struct idr qpidr; - struct idr mmidr; - spinlock_t lock; + struct xarray cqs; + struct xarray qps; + struct xarray mrs; struct mutex db_mutex; struct dentry *debugfs_root; enum db_state db_state; - struct idr hwtid_idr; - struct idr atid_idr; - struct idr stid_idr; + struct xarray hwtids; + struct xarray atids; + struct xarray stids; struct list_head db_fc_list; u32 avail_ird; wait_queue_head_t wait; @@ -342,77 +340,14 @@ static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) return container_of(ibdev, struct c4iw_dev, ibdev); } -static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev) -{ - return container_of(rdev, struct c4iw_dev, rdev); -} - static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) { - return idr_find(&rhp->cqidr, cqid); + return xa_load(&rhp->cqs, cqid); } static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) { - return idr_find(&rhp->qpidr, qpid); -} - -static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid) -{ - return idr_find(&rhp->mmidr, mmid); -} - -static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr, - void *handle, u32 id, int lock) -{ - int ret; - - if (lock) { - idr_preload(GFP_KERNEL); - spin_lock_irq(&rhp->lock); - } - - ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC); - - if (lock) { - spin_unlock_irq(&rhp->lock); - idr_preload_end(); - } - - return ret < 0 ? ret : 0; -} - -static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, - void *handle, u32 id) -{ - return _insert_handle(rhp, idr, handle, id, 1); -} - -static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, - void *handle, u32 id) -{ - return _insert_handle(rhp, idr, handle, id, 0); -} - -static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr, - u32 id, int lock) -{ - if (lock) - spin_lock_irq(&rhp->lock); - idr_remove(idr, id); - if (lock) - spin_unlock_irq(&rhp->lock); -} - -static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id) -{ - _remove_handle(rhp, idr, id, 1); -} - -static inline void remove_handle_nolock(struct c4iw_dev *rhp, - struct idr *idr, u32 id) -{ - _remove_handle(rhp, idr, id, 0); + return xa_load(&rhp->qps, qpid); } extern uint c4iw_max_read_depth; @@ -491,8 +426,8 @@ struct c4iw_cq { struct t4_cq cq; spinlock_t lock; spinlock_t comp_handler_lock; - atomic_t refcnt; - wait_queue_head_t wait; + refcount_t refcnt; + struct completion cq_rel_comp; struct c4iw_wr_wait *wr_waitp; }; @@ -549,13 +484,13 @@ struct c4iw_qp { struct t4_wq wq; spinlock_t lock; struct mutex mutex; - struct kref kref; wait_queue_head_t wait; int sq_sig_all; struct c4iw_srq *srq; - struct work_struct free_work; struct c4iw_ucontext *ucontext; struct c4iw_wr_wait *wr_waitp; + struct completion qp_rel_comp; + refcount_t qp_refcnt; }; static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) @@ -589,7 +524,6 @@ struct c4iw_ucontext { u32 key; spinlock_t mmap_lock; struct list_head mmaps; - struct kref kref; bool is_32b_cqe; }; @@ -598,23 +532,21 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) return container_of(c, struct c4iw_ucontext, ibucontext); } -void _c4iw_free_ucontext(struct kref *kref); - -static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext) -{ - kref_put(&ucontext->kref, _c4iw_free_ucontext); -} - -static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext) -{ - kref_get(&ucontext->kref); -} +enum { + CXGB4_MMAP_BAR, + CXGB4_MMAP_BAR_WC, + CXGB4_MMAP_CONTIG, + CXGB4_MMAP_NON_CONTIG, +}; struct c4iw_mm_entry { struct list_head entry; u64 addr; u32 key; + void *vaddr; + dma_addr_t dma_addr; unsigned len; + u8 mmap_flag; }; static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, @@ -639,6 +571,32 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, return NULL; } +static inline void insert_flag_to_mmap(struct c4iw_rdev *rdev, + struct c4iw_mm_entry *mm, u64 addr) +{ + if (addr >= pci_resource_start(rdev->lldi.pdev, 0) && + (addr < (pci_resource_start(rdev->lldi.pdev, 0) + + pci_resource_len(rdev->lldi.pdev, 0)))) + mm->mmap_flag = CXGB4_MMAP_BAR; + else if (addr >= pci_resource_start(rdev->lldi.pdev, 2) && + (addr < (pci_resource_start(rdev->lldi.pdev, 2) + + pci_resource_len(rdev->lldi.pdev, 2)))) { + if (addr >= rdev->oc_mw_pa) { + mm->mmap_flag = CXGB4_MMAP_BAR_WC; + } else { + if (is_t4(rdev->lldi.adapter_type)) + mm->mmap_flag = CXGB4_MMAP_BAR; + else + mm->mmap_flag = CXGB4_MMAP_BAR_WC; + } + } else { + if (addr) + mm->mmap_flag = CXGB4_MMAP_CONTIG; + else + mm->mmap_flag = CXGB4_MMAP_NON_CONTIG; + } +} + static inline void insert_mmap(struct c4iw_ucontext *ucontext, struct c4iw_mm_entry *mm) { @@ -731,12 +689,6 @@ static inline u32 c4iw_ib_to_tpt_access(int a) FW_RI_MEM_ACCESS_LOCAL_READ; } -static inline u32 c4iw_ib_to_tpt_bind_access(int acc) -{ - return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | - (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0); -} - enum c4iw_mmid_state { C4IW_STAG_STATE_VALID, C4IW_STAG_STATE_INVALID @@ -779,7 +731,7 @@ struct mpa_message { u8 flags; u8 revision; __be16 private_data_size; - u8 private_data[0]; + u8 private_data[]; }; struct mpa_v2_conn_params { @@ -791,7 +743,7 @@ struct terminate_message { u8 layer_etype; u8 ecode; __be16 hdrct_rsvd; - u8 len_hdrs[0]; + u8 len_hdrs[]; }; #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28) @@ -982,6 +934,9 @@ struct c4iw_ep { int rcv_win; u32 snd_wscale; struct c4iw_ep_stats stats; + u32 srqe_idx; + u32 rx_pdu_out_cnt; + struct sk_buff *peer_abort_skb; }; static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) @@ -1011,15 +966,12 @@ void c4iw_id_table_free(struct c4iw_id_table *alloc); typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb); -int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, - struct l2t_entry *l2t); void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid, struct c4iw_dev_ucontext *uctx); u32 c4iw_get_resource(struct c4iw_id_table *id_table); void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry); int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid, u32 nr_srqt); -int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); int c4iw_pblpool_create(struct c4iw_rdev *rdev); int c4iw_rqtpool_create(struct c4iw_rdev *rdev); int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev); @@ -1027,7 +979,6 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev); void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev); void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev); void c4iw_destroy_resource(struct c4iw_resource *rscp); -int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); void c4iw_register_device(struct work_struct *work); void c4iw_unregister_device(struct c4iw_dev *dev); int __init c4iw_cm_init(void); @@ -1048,37 +999,31 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); void c4iw_qp_add_ref(struct ib_qp *qp); void c4iw_qp_rem_ref(struct ib_qp *qp); -struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, - enum ib_mr_type mr_type, +struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); -int c4iw_dealloc_mw(struct ib_mw *mw); void c4iw_dealloc(struct uld_ctx *ctx); -struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, - struct ib_udata *udata); struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, + struct ib_dmah *dmah, struct ib_udata *udata); struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); -int c4iw_dereg_mr(struct ib_mr *ib_mr); -int c4iw_destroy_cq(struct ib_cq *ib_cq); -struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, - const struct ib_cq_init_attr *attr, - struct ib_ucontext *ib_context, - struct ib_udata *udata); +int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); +int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +void c4iw_cq_rem_ref(struct c4iw_cq *chp); +int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct uverbs_attr_bundle *attrs); int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata); -int c4iw_destroy_srq(struct ib_srq *ib_srq); -struct ib_srq *c4iw_create_srq(struct ib_pd *pd, - struct ib_srq_init_attr *attrs, - struct ib_udata *udata); -int c4iw_destroy_qp(struct ib_qp *ib_qp); -struct ib_qp *c4iw_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *attrs, - struct ib_udata *udata); +int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata); +int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs, + struct ib_udata *udata); +int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata); +int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, + struct ib_udata *udata); int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, @@ -1096,8 +1041,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_flush_sq(struct c4iw_qp *qhp); int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); -u16 c4iw_rqes_posted(struct c4iw_qp *qhp); -int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx); @@ -1126,8 +1069,9 @@ int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp); -typedef int c4iw_restrack_func(struct sk_buff *msg, - struct rdma_restrack_entry *res); -extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX]; +int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr); +int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq); +int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp); +int c4iw_fill_res_cm_id_entry(struct sk_buff *msg, struct rdma_cm_id *cm_id); #endif |
