diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_comp.c | 49 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_hdr.h | 178 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_mcast.c | 64 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_net.c | 17 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_pool.c | 300 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_pool.h | 103 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_qp.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_recv.c | 40 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_req.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_resp.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.c | 68 |
11 files changed, 470 insertions, 367 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 0a1e6393250b..a8ac791a1bb9 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -515,6 +515,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) while ((skb = skb_dequeue(&qp->resp_pkts))) { rxe_drop_ref(qp); kfree_skb(skb); + ib_device_put(qp->ibqp.device); } while ((wqe = queue_head(qp->sq.queue))) { @@ -527,6 +528,17 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) } } +static void free_pkt(struct rxe_pkt_info *pkt) +{ + struct sk_buff *skb = PKT_TO_SKB(pkt); + struct rxe_qp *qp = pkt->qp; + struct ib_device *dev = qp->ibqp.device; + + kfree_skb(skb); + rxe_drop_ref(qp); + ib_device_put(dev); +} + int rxe_completer(void *arg) { struct rxe_qp *qp = (struct rxe_qp *)arg; @@ -624,11 +636,8 @@ int rxe_completer(void *arg) break; case COMPST_DONE: - if (pkt) { - rxe_drop_ref(pkt->qp); - kfree_skb(skb); - skb = NULL; - } + if (pkt) + free_pkt(pkt); goto done; case COMPST_EXIT: @@ -671,12 +680,8 @@ int rxe_completer(void *arg) */ if (qp->comp.started_retry && !qp->comp.timeout_retry) { - if (pkt) { - rxe_drop_ref(pkt->qp); - kfree_skb(skb); - skb = NULL; - } - + if (pkt) + free_pkt(pkt); goto done; } @@ -699,13 +704,8 @@ int rxe_completer(void *arg) qp->comp.started_retry = 1; rxe_run_task(&qp->req.task, 0); } - - if (pkt) { - rxe_drop_ref(pkt->qp); - kfree_skb(skb); - skb = NULL; - } - + if (pkt) + free_pkt(pkt); goto done; } else { @@ -726,9 +726,7 @@ int rxe_completer(void *arg) mod_timer(&qp->rnr_nak_timer, jiffies + rnrnak_jiffies(aeth_syn(pkt) & ~AETH_TYPE_MASK)); - rxe_drop_ref(pkt->qp); - kfree_skb(skb); - skb = NULL; + free_pkt(pkt); goto exit; } else { rxe_counter_inc(rxe, @@ -742,13 +740,8 @@ int rxe_completer(void *arg) WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS); do_complete(qp, wqe); rxe_qp_error(qp); - - if (pkt) { - rxe_drop_ref(pkt->qp); - kfree_skb(skb); - skb = NULL; - } - + if (pkt) + free_pkt(pkt); goto exit; } } diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h index 3b483b75dfe3..e432f9e37795 100644 --- a/drivers/infiniband/sw/rxe/rxe_hdr.h +++ b/drivers/infiniband/sw/rxe/rxe_hdr.h @@ -22,7 +22,6 @@ struct rxe_pkt_info { u16 paylen; /* length of bth - icrc */ u8 port_num; /* port pkt received on */ u8 opcode; /* bth opcode of packet */ - u8 offset; /* bth offset from pkt->hdr */ }; /* Macros should be used only for received skb */ @@ -280,134 +279,134 @@ static inline void __bth_set_psn(void *arg, u32 psn) static inline u8 bth_opcode(struct rxe_pkt_info *pkt) { - return __bth_opcode(pkt->hdr + pkt->offset); + return __bth_opcode(pkt->hdr); } static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode) { - __bth_set_opcode(pkt->hdr + pkt->offset, opcode); + __bth_set_opcode(pkt->hdr, opcode); } static inline u8 bth_se(struct rxe_pkt_info *pkt) { - return __bth_se(pkt->hdr + pkt->offset); + return __bth_se(pkt->hdr); } static inline void bth_set_se(struct rxe_pkt_info *pkt, int se) { - __bth_set_se(pkt->hdr + pkt->offset, se); + __bth_set_se(pkt->hdr, se); } static inline u8 bth_mig(struct rxe_pkt_info *pkt) { - return __bth_mig(pkt->hdr + pkt->offset); + return __bth_mig(pkt->hdr); } static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig) { - __bth_set_mig(pkt->hdr + pkt->offset, mig); + __bth_set_mig(pkt->hdr, mig); } static inline u8 bth_pad(struct rxe_pkt_info *pkt) { - return __bth_pad(pkt->hdr + pkt->offset); + return __bth_pad(pkt->hdr); } static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad) { - __bth_set_pad(pkt->hdr + pkt->offset, pad); + __bth_set_pad(pkt->hdr, pad); } static inline u8 bth_tver(struct rxe_pkt_info *pkt) { - return __bth_tver(pkt->hdr + pkt->offset); + return __bth_tver(pkt->hdr); } static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver) { - __bth_set_tver(pkt->hdr + pkt->offset, tver); + __bth_set_tver(pkt->hdr, tver); } static inline u16 bth_pkey(struct rxe_pkt_info *pkt) { - return __bth_pkey(pkt->hdr + pkt->offset); + return __bth_pkey(pkt->hdr); } static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey) { - __bth_set_pkey(pkt->hdr + pkt->offset, pkey); + __bth_set_pkey(pkt->hdr, pkey); } static inline u32 bth_qpn(struct rxe_pkt_info *pkt) { - return __bth_qpn(pkt->hdr + pkt->offset); + return __bth_qpn(pkt->hdr); } static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn) { - __bth_set_qpn(pkt->hdr + pkt->offset, qpn); + __bth_set_qpn(pkt->hdr, qpn); } static inline int bth_fecn(struct rxe_pkt_info *pkt) { - return __bth_fecn(pkt->hdr + pkt->offset); + return __bth_fecn(pkt->hdr); } static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn) { - __bth_set_fecn(pkt->hdr + pkt->offset, fecn); + __bth_set_fecn(pkt->hdr, fecn); } static inline int bth_becn(struct rxe_pkt_info *pkt) { - return __bth_becn(pkt->hdr + pkt->offset); + return __bth_becn(pkt->hdr); } static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn) { - __bth_set_becn(pkt->hdr + pkt->offset, becn); + __bth_set_becn(pkt->hdr, becn); } static inline u8 bth_resv6a(struct rxe_pkt_info *pkt) { - return __bth_resv6a(pkt->hdr + pkt->offset); + return __bth_resv6a(pkt->hdr); } static inline void bth_set_resv6a(struct rxe_pkt_info *pkt) { - __bth_set_resv6a(pkt->hdr + pkt->offset); + __bth_set_resv6a(pkt->hdr); } static inline int bth_ack(struct rxe_pkt_info *pkt) { - return __bth_ack(pkt->hdr + pkt->offset); + return __bth_ack(pkt->hdr); } static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack) { - __bth_set_ack(pkt->hdr + pkt->offset, ack); + __bth_set_ack(pkt->hdr, ack); } static inline void bth_set_resv7(struct rxe_pkt_info *pkt) { - __bth_set_resv7(pkt->hdr + pkt->offset); + __bth_set_resv7(pkt->hdr); } static inline u32 bth_psn(struct rxe_pkt_info *pkt) { - return __bth_psn(pkt->hdr + pkt->offset); + return __bth_psn(pkt->hdr); } static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn) { - __bth_set_psn(pkt->hdr + pkt->offset, psn); + __bth_set_psn(pkt->hdr, psn); } static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se, int mig, int pad, u16 pkey, u32 qpn, int ack_req, u32 psn) { - struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset); + struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr); bth->opcode = opcode; bth->flags = (pad << 4) & BTH_PAD_MASK; @@ -448,14 +447,14 @@ static inline void __rdeth_set_een(void *arg, u32 een) static inline u8 rdeth_een(struct rxe_pkt_info *pkt) { - return __rdeth_een(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_RDETH]); + return __rdeth_een(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_RDETH]); } static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een) { - __rdeth_set_een(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_RDETH], een); + __rdeth_set_een(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_RDETH], een); } /****************************************************************************** @@ -499,26 +498,26 @@ static inline void __deth_set_sqp(void *arg, u32 sqp) static inline u32 deth_qkey(struct rxe_pkt_info *pkt) { - return __deth_qkey(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_DETH]); + return __deth_qkey(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_DETH]); } static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey) { - __deth_set_qkey(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey); + __deth_set_qkey(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey); } static inline u32 deth_sqp(struct rxe_pkt_info *pkt) { - return __deth_sqp(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_DETH]); + return __deth_sqp(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_DETH]); } static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp) { - __deth_set_sqp(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp); + __deth_set_sqp(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp); } /****************************************************************************** @@ -574,38 +573,38 @@ static inline void __reth_set_len(void *arg, u32 len) static inline u64 reth_va(struct rxe_pkt_info *pkt) { - return __reth_va(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_RETH]); + return __reth_va(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_RETH]); } static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va) { - __reth_set_va(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_RETH], va); + __reth_set_va(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_RETH], va); } static inline u32 reth_rkey(struct rxe_pkt_info *pkt) { - return __reth_rkey(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_RETH]); + return __reth_rkey(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_RETH]); } static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey) { - __reth_set_rkey(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey); + __reth_set_rkey(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey); } static inline u32 reth_len(struct rxe_pkt_info *pkt) { - return __reth_len(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_RETH]); + return __reth_len(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_RETH]); } static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len) { - __reth_set_len(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_RETH], len); + __reth_set_len(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_RETH], len); } /****************************************************************************** @@ -676,50 +675,50 @@ static inline void __atmeth_set_comp(void *arg, u64 comp) static inline u64 atmeth_va(struct rxe_pkt_info *pkt) { - return __atmeth_va(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]); + return __atmeth_va(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]); } static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va) { - __atmeth_set_va(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va); + __atmeth_set_va(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va); } static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt) { - return __atmeth_rkey(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]); + return __atmeth_rkey(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]); } static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey) { - __atmeth_set_rkey(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey); + __atmeth_set_rkey(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey); } static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt) { - return __atmeth_swap_add(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]); + return __atmeth_swap_add(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]); } static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add) { - __atmeth_set_swap_add(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add); + __atmeth_set_swap_add(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add); } static inline u64 atmeth_comp(struct rxe_pkt_info *pkt) { - return __atmeth_comp(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]); + return __atmeth_comp(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]); } static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp) { - __atmeth_set_comp(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp); + __atmeth_set_comp(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp); } /****************************************************************************** @@ -780,26 +779,26 @@ static inline void __aeth_set_msn(void *arg, u32 msn) static inline u8 aeth_syn(struct rxe_pkt_info *pkt) { - return __aeth_syn(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_AETH]); + return __aeth_syn(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_AETH]); } static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn) { - __aeth_set_syn(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_AETH], syn); + __aeth_set_syn(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_AETH], syn); } static inline u32 aeth_msn(struct rxe_pkt_info *pkt) { - return __aeth_msn(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_AETH]); + return __aeth_msn(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_AETH]); } static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn) { - __aeth_set_msn(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_AETH], msn); + __aeth_set_msn(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_AETH], msn); } /****************************************************************************** @@ -825,14 +824,14 @@ static inline void __atmack_set_orig(void *arg, u64 orig) static inline u64 atmack_orig(struct rxe_pkt_info *pkt) { - return __atmack_orig(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMACK]); + return __atmack_orig(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMACK]); } static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig) { - __atmack_set_orig(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig); + __atmack_set_orig(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig); } /****************************************************************************** @@ -858,14 +857,14 @@ static inline void __immdt_set_imm(void *arg, __be32 imm) static inline __be32 immdt_imm(struct rxe_pkt_info *pkt) { - return __immdt_imm(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_IMMDT]); + return __immdt_imm(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_IMMDT]); } static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm) { - __immdt_set_imm(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm); + __immdt_set_imm(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm); } /****************************************************************************** @@ -891,14 +890,14 @@ static inline void __ieth_set_rkey(void *arg, u32 rkey) static inline u32 ieth_rkey(struct rxe_pkt_info *pkt) { - return __ieth_rkey(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_IETH]); + return __ieth_rkey(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_IETH]); } static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey) { - __ieth_set_rkey(pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey); + __ieth_set_rkey(pkt->hdr + + rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey); } enum rxe_hdr_length { @@ -915,13 +914,12 @@ enum rxe_hdr_length { static inline size_t header_size(struct rxe_pkt_info *pkt) { - return pkt->offset + rxe_opcode[pkt->opcode].length; + return rxe_opcode[pkt->opcode].length; } static inline void *payload_addr(struct rxe_pkt_info *pkt) { - return pkt->hdr + pkt->offset - + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]; + return pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]; } static inline size_t payload_size(struct rxe_pkt_info *pkt) diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c index c02315aed8d1..0ea9a5aa4ec0 100644 --- a/drivers/infiniband/sw/rxe/rxe_mcast.c +++ b/drivers/infiniband/sw/rxe/rxe_mcast.c @@ -7,45 +7,61 @@ #include "rxe.h" #include "rxe_loc.h" +/* caller should hold mc_grp_pool->pool_lock */ +static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe, + struct rxe_pool *pool, + union ib_gid *mgid) +{ + int err; + struct rxe_mc_grp *grp; + + grp = rxe_alloc_locked(&rxe->mc_grp_pool); + if (!grp) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&grp->qp_list); + spin_lock_init(&grp->mcg_lock); + grp->rxe = rxe; + rxe_add_key_locked(grp, mgid); + + err = rxe_mcast_add(rxe, mgid); + if (unlikely(err)) { + rxe_drop_key_locked(grp); + rxe_drop_ref(grp); + return ERR_PTR(err); + } + + return grp; +} + int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, struct rxe_mc_grp **grp_p) { int err; struct rxe_mc_grp *grp; + struct rxe_pool *pool = &rxe->mc_grp_pool; + unsigned long flags; - if (rxe->attr.max_mcast_qp_attach == 0) { - err = -EINVAL; - goto err1; - } + if (rxe->attr.max_mcast_qp_attach == 0) + return -EINVAL; - grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid); + write_lock_irqsave(&pool->pool_lock, flags); + + grp = rxe_pool_get_key_locked(pool, mgid); if (grp) goto done; - grp = rxe_alloc(&rxe->mc_grp_pool); - if (!grp) { - err = -ENOMEM; - goto err1; + grp = create_grp(rxe, pool, mgid); + if (IS_ERR(grp)) { + write_unlock_irqrestore(&pool->pool_lock, flags); + err = PTR_ERR(grp); + return err; } - INIT_LIST_HEAD(&grp->qp_list); - spin_lock_init(&grp->mcg_lock); - grp->rxe = rxe; - - rxe_add_key(grp, mgid); - - err = rxe_mcast_add(rxe, mgid); - if (err) - goto err2; - done: + write_unlock_irqrestore(&pool->pool_lock, flags); *grp_p = grp; return 0; - -err2: - rxe_drop_ref(grp); -err1: - return err; } int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index c4b06ced30a7..36d56163afac 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -152,10 +152,14 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev, static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct udphdr *udph; + struct rxe_dev *rxe; struct net_device *ndev = skb->dev; - struct rxe_dev *rxe = rxe_get_dev_from_net(ndev); struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); + /* takes a reference on rxe->ib_dev + * drop when skb is freed + */ + rxe = rxe_get_dev_from_net(ndev); if (!rxe) goto drop; @@ -174,12 +178,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) rxe_rcv(skb); - /* - * FIXME: this is in the wrong place, it needs to be done when pkt is - * destroyed - */ - ib_device_put(&rxe->ib_dev); - return 0; drop: kfree_skb(skb); @@ -408,6 +406,11 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb) void rxe_loopback(struct sk_buff *skb) { + if (skb->protocol == htons(ETH_P_IP)) + skb_pull(skb, sizeof(struct iphdr)); + else + skb_pull(skb, sizeof(struct ipv6hdr)); + rxe_rcv(skb); } diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index b374eb53e2fe..307d8986e7c9 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -15,21 +15,25 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_UC] = { .name = "rxe-uc", .size = sizeof(struct rxe_ucontext), + .elem_offset = offsetof(struct rxe_ucontext, pelem), .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_PD] = { .name = "rxe-pd", .size = sizeof(struct rxe_pd), + .elem_offset = offsetof(struct rxe_pd, pelem), .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_AH] = { .name = "rxe-ah", .size = sizeof(struct rxe_ah), - .flags = RXE_POOL_ATOMIC | RXE_POOL_NO_ALLOC, + .elem_offset = offsetof(struct rxe_ah, pelem), + .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_SRQ] = { .name = "rxe-srq", .size = sizeof(struct rxe_srq), + .elem_offset = offsetof(struct rxe_srq, pelem), .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_SRQ_INDEX, .max_index = RXE_MAX_SRQ_INDEX, @@ -37,6 +41,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_QP] = { .name = "rxe-qp", .size = sizeof(struct rxe_qp), + .elem_offset = offsetof(struct rxe_qp, pelem), .cleanup = rxe_qp_cleanup, .flags = RXE_POOL_INDEX, .min_index = RXE_MIN_QP_INDEX, @@ -45,12 +50,14 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_CQ] = { .name = "rxe-cq", .size = sizeof(struct rxe_cq), + .elem_offset = offsetof(struct rxe_cq, pelem), .flags = RXE_POOL_NO_ALLOC, .cleanup = rxe_cq_cleanup, }, [RXE_TYPE_MR] = { .name = "rxe-mr", .size = sizeof(struct rxe_mem), + .elem_offset = offsetof(struct rxe_mem, pelem), .cleanup = rxe_mem_cleanup, .flags = RXE_POOL_INDEX, .max_index = RXE_MAX_MR_INDEX, @@ -59,6 +66,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_MW] = { .name = "rxe-mw", .size = sizeof(struct rxe_mem), + .elem_offset = offsetof(struct rxe_mem, pelem), .flags = RXE_POOL_INDEX, .max_index = RXE_MAX_MW_INDEX, .min_index = RXE_MIN_MW_INDEX, @@ -66,6 +74,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_MC_GRP] = { .name = "rxe-mc_grp", .size = sizeof(struct rxe_mc_grp), + .elem_offset = offsetof(struct rxe_mc_grp, pelem), .cleanup = rxe_mc_cleanup, .flags = RXE_POOL_KEY, .key_offset = offsetof(struct rxe_mc_grp, mgid), @@ -74,7 +83,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_MC_ELEM] = { .name = "rxe-mc_elem", .size = sizeof(struct rxe_mc_elem), - .flags = RXE_POOL_ATOMIC, + .elem_offset = offsetof(struct rxe_mc_elem, pelem), }, }; @@ -94,18 +103,18 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) goto out; } - pool->max_index = max; - pool->min_index = min; + pool->index.max_index = max; + pool->index.min_index = min; size = BITS_TO_LONGS(max - min + 1) * sizeof(long); - pool->table = kmalloc(size, GFP_KERNEL); - if (!pool->table) { + pool->index.table = kmalloc(size, GFP_KERNEL); + if (!pool->index.table) { err = -ENOMEM; goto out; } - pool->table_size = size; - bitmap_zero(pool->table, max - min + 1); + pool->index.table_size = size; + bitmap_zero(pool->index.table, max - min + 1); out: return err; @@ -127,13 +136,12 @@ int rxe_pool_init( pool->max_elem = max_elem; pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); pool->flags = rxe_type_info[type].flags; - pool->tree = RB_ROOT; + pool->index.tree = RB_ROOT; + pool->key.tree = RB_ROOT; pool->cleanup = rxe_type_info[type].cleanup; atomic_set(&pool->num_elem, 0); - kref_init(&pool->ref_cnt); - rwlock_init(&pool->pool_lock); if (rxe_type_info[type].flags & RXE_POOL_INDEX) { @@ -145,67 +153,47 @@ int rxe_pool_init( } if (rxe_type_info[type].flags & RXE_POOL_KEY) { - pool->key_offset = rxe_type_info[type].key_offset; - pool->key_size = rxe_type_info[type].key_size; + pool->key.key_offset = rxe_type_info[type].key_offset; + pool->key.key_size = rxe_type_info[type].key_size; } - pool->state = RXE_POOL_STATE_VALID; - out: return err; } -static void rxe_pool_release(struct kref *kref) -{ - struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt); - - pool->state = RXE_POOL_STATE_INVALID; - kfree(pool->table); -} - -static void rxe_pool_put(struct rxe_pool *pool) -{ - kref_put(&pool->ref_cnt, rxe_pool_release); -} - void rxe_pool_cleanup(struct rxe_pool *pool) { - unsigned long flags; - - write_lock_irqsave(&pool->pool_lock, flags); - pool->state = RXE_POOL_STATE_INVALID; if (atomic_read(&pool->num_elem) > 0) pr_warn("%s pool destroyed with unfree'd elem\n", pool_name(pool)); - write_unlock_irqrestore(&pool->pool_lock, flags); - rxe_pool_put(pool); + kfree(pool->index.table); } static u32 alloc_index(struct rxe_pool *pool) { u32 index; - u32 range = pool->max_index - pool->min_index + 1; + u32 range = pool->index.max_index - pool->index.min_index + 1; - index = find_next_zero_bit(pool->table, range, pool->last); + index = find_next_zero_bit(pool->index.table, range, pool->index.last); if (index >= range) - index = find_first_zero_bit(pool->table, range); + index = find_first_zero_bit(pool->index.table, range); WARN_ON_ONCE(index >= range); - set_bit(index, pool->table); - pool->last = index; - return index + pool->min_index; + set_bit(index, pool->index.table); + pool->index.last = index; + return index + pool->index.min_index; } static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) { - struct rb_node **link = &pool->tree.rb_node; + struct rb_node **link = &pool->index.tree.rb_node; struct rb_node *parent = NULL; struct rxe_pool_entry *elem; while (*link) { parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, node); + elem = rb_entry(parent, struct rxe_pool_entry, index_node); if (elem->index == new->index) { pr_warn("element already exists!\n"); @@ -218,25 +206,25 @@ static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) link = &(*link)->rb_right; } - rb_link_node(&new->node, parent, link); - rb_insert_color(&new->node, &pool->tree); + rb_link_node(&new->index_node, parent, link); + rb_insert_color(&new->index_node, &pool->index.tree); out: return; } static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) { - struct rb_node **link = &pool->tree.rb_node; + struct rb_node **link = &pool->key.tree.rb_node; struct rb_node *parent = NULL; struct rxe_pool_entry *elem; int cmp; while (*link) { parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, node); + elem = rb_entry(parent, struct rxe_pool_entry, key_node); - cmp = memcmp((u8 *)elem + pool->key_offset, - (u8 *)new + pool->key_offset, pool->key_size); + cmp = memcmp((u8 *)elem + pool->key.key_offset, + (u8 *)new + pool->key.key_offset, pool->key.key_size); if (cmp == 0) { pr_warn("key already exists!\n"); @@ -249,116 +237,135 @@ static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) link = &(*link)->rb_right; } - rb_link_node(&new->node, parent, link); - rb_insert_color(&new->node, &pool->tree); + rb_link_node(&new->key_node, parent, link); + rb_insert_color(&new->key_node, &pool->key.tree); out: return; } -void rxe_add_key(void *arg, void *key) +void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) { - struct rxe_pool_entry *elem = arg; struct rxe_pool *pool = elem->pool; - unsigned long flags; - write_lock_irqsave(&pool->pool_lock, flags); - memcpy((u8 *)elem + pool->key_offset, key, pool->key_size); + memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size); insert_key(pool, elem); - write_unlock_irqrestore(&pool->pool_lock, flags); } -void rxe_drop_key(void *arg) +void __rxe_add_key(struct rxe_pool_entry *elem, void *key) { - struct rxe_pool_entry *elem = arg; struct rxe_pool *pool = elem->pool; unsigned long flags; write_lock_irqsave(&pool->pool_lock, flags); - rb_erase(&elem->node, &pool->tree); + __rxe_add_key_locked(elem, key); write_unlock_irqrestore(&pool->pool_lock, flags); } -void rxe_add_index(void *arg) +void __rxe_drop_key_locked(struct rxe_pool_entry *elem) +{ + struct rxe_pool *pool = elem->pool; + + rb_erase(&elem->key_node, &pool->key.tree); +} + +void __rxe_drop_key(struct rxe_pool_entry *elem) { - struct rxe_pool_entry *elem = arg; struct rxe_pool *pool = elem->pool; unsigned long flags; write_lock_irqsave(&pool->pool_lock, flags); + __rxe_drop_key_locked(elem); + write_unlock_irqrestore(&pool->pool_lock, flags); +} + +void __rxe_add_index_locked(struct rxe_pool_entry *elem) +{ + struct rxe_pool *pool = elem->pool; + elem->index = alloc_index(pool); insert_index(pool, elem); - write_unlock_irqrestore(&pool->pool_lock, flags); } -void rxe_drop_index(void *arg) +void __rxe_add_index(struct rxe_pool_entry *elem) { - struct rxe_pool_entry *elem = arg; struct rxe_pool *pool = elem->pool; unsigned long flags; write_lock_irqsave(&pool->pool_lock, flags); - clear_bit(elem->index - pool->min_index, pool->table); - rb_erase(&elem->node, &pool->tree); + __rxe_add_index_locked(elem); write_unlock_irqrestore(&pool->pool_lock, flags); } -void *rxe_alloc(struct rxe_pool *pool) +void __rxe_drop_index_locked(struct rxe_pool_entry *elem) { - struct rxe_pool_entry *elem; - unsigned long flags; + struct rxe_pool *pool = elem->pool; - might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC)); + clear_bit(elem->index - pool->index.min_index, pool->index.table); + rb_erase(&elem->index_node, &pool->index.tree); +} - read_lock_irqsave(&pool->pool_lock, flags); - if (pool->state != RXE_POOL_STATE_VALID) { - read_unlock_irqrestore(&pool->pool_lock, flags); - return NULL; - } - kref_get(&pool->ref_cnt); - read_unlock_irqrestore(&pool->pool_lock, flags); +void __rxe_drop_index(struct rxe_pool_entry *elem) +{ + struct rxe_pool *pool = elem->pool; + unsigned long flags; + + write_lock_irqsave(&pool->pool_lock, flags); + __rxe_drop_index_locked(elem); + write_unlock_irqrestore(&pool->pool_lock, flags); +} - if (!ib_device_try_get(&pool->rxe->ib_dev)) - goto out_put_pool; +void *rxe_alloc_locked(struct rxe_pool *pool) +{ + struct rxe_type_info *info = &rxe_type_info[pool->type]; + struct rxe_pool_entry *elem; + u8 *obj; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; - elem = kzalloc(rxe_type_info[pool->type].size, - (pool->flags & RXE_POOL_ATOMIC) ? - GFP_ATOMIC : GFP_KERNEL); - if (!elem) + obj = kzalloc(info->size, GFP_ATOMIC); + if (!obj) goto out_cnt; + elem = (struct rxe_pool_entry *)(obj + info->elem_offset); + elem->pool = pool; kref_init(&elem->ref_cnt); - return elem; + return obj; out_cnt: atomic_dec(&pool->num_elem); - ib_device_put(&pool->rxe->ib_dev); -out_put_pool: - rxe_pool_put(pool); return NULL; } -int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) +void *rxe_alloc(struct rxe_pool *pool) { - unsigned long flags; + struct rxe_type_info *info = &rxe_type_info[pool->type]; + struct rxe_pool_entry *elem; + u8 *obj; + + if (atomic_inc_return(&pool->num_elem) > pool->max_elem) + goto out_cnt; - might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC)); + obj = kzalloc(info->size, GFP_KERNEL); + if (!obj) + goto out_cnt; - read_lock_irqsave(&pool->pool_lock, flags); - if (pool->state != RXE_POOL_STATE_VALID) { - read_unlock_irqrestore(&pool->pool_lock, flags); - return -EINVAL; - } - kref_get(&pool->ref_cnt); - read_unlock_irqrestore(&pool->pool_lock, flags); + elem = (struct rxe_pool_entry *)(obj + info->elem_offset); - if (!ib_device_try_get(&pool->rxe->ib_dev)) - goto out_put_pool; + elem->pool = pool; + kref_init(&elem->ref_cnt); + + return obj; + +out_cnt: + atomic_dec(&pool->num_elem); + return NULL; +} +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) +{ if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; @@ -369,9 +376,6 @@ int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) out_cnt: atomic_dec(&pool->num_elem); - ib_device_put(&pool->rxe->ib_dev); -out_put_pool: - rxe_pool_put(pool); return -EINVAL; } @@ -380,67 +384,77 @@ void rxe_elem_release(struct kref *kref) struct rxe_pool_entry *elem = container_of(kref, struct rxe_pool_entry, ref_cnt); struct rxe_pool *pool = elem->pool; + struct rxe_type_info *info = &rxe_type_info[pool->type]; + u8 *obj; if (pool->cleanup) pool->cleanup(elem); - if (!(pool->flags & RXE_POOL_NO_ALLOC)) - kfree(elem); + if (!(pool->flags & RXE_POOL_NO_ALLOC)) { + obj = (u8 *)elem - info->elem_offset; + kfree(obj); + } + atomic_dec(&pool->num_elem); - ib_device_put(&pool->rxe->ib_dev); - rxe_pool_put(pool); } -void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) +void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) { - struct rb_node *node = NULL; - struct rxe_pool_entry *elem = NULL; - unsigned long flags; - - read_lock_irqsave(&pool->pool_lock, flags); - - if (pool->state != RXE_POOL_STATE_VALID) - goto out; + struct rxe_type_info *info = &rxe_type_info[pool->type]; + struct rb_node *node; + struct rxe_pool_entry *elem; + u8 *obj; - node = pool->tree.rb_node; + node = pool->index.tree.rb_node; while (node) { - elem = rb_entry(node, struct rxe_pool_entry, node); + elem = rb_entry(node, struct rxe_pool_entry, index_node); if (elem->index > index) node = node->rb_left; else if (elem->index < index) node = node->rb_right; - else { - kref_get(&elem->ref_cnt); + else break; - } } -out: - read_unlock_irqrestore(&pool->pool_lock, flags); - return node ? elem : NULL; + if (node) { + kref_get(&elem->ref_cnt); + obj = (u8 *)elem - info->elem_offset; + } else { + obj = NULL; + } + + return obj; } -void *rxe_pool_get_key(struct rxe_pool *pool, void *key) +void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) { - struct rb_node *node = NULL; - struct rxe_pool_entry *elem = NULL; - int cmp; + u8 *obj; unsigned long flags; read_lock_irqsave(&pool->pool_lock, flags); + obj = rxe_pool_get_index_locked(pool, index); + read_unlock_irqrestore(&pool->pool_lock, flags); - if (pool->state != RXE_POOL_STATE_VALID) - goto out; + return obj; +} - node = pool->tree.rb_node; +void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) +{ + struct rxe_type_info *info = &rxe_type_info[pool->type]; + struct rb_node *node; + struct rxe_pool_entry *elem; + u8 *obj; + int cmp; + + node = pool->key.tree.rb_node; while (node) { - elem = rb_entry(node, struct rxe_pool_entry, node); + elem = rb_entry(node, struct rxe_pool_entry, key_node); - cmp = memcmp((u8 *)elem + pool->key_offset, - key, pool->key_size); + cmp = memcmp((u8 *)elem + pool->key.key_offset, + key, pool->key.key_size); if (cmp > 0) node = node->rb_left; @@ -450,10 +464,24 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key) break; } - if (node) + if (node) { kref_get(&elem->ref_cnt); + obj = (u8 *)elem - info->elem_offset; + } else { + obj = NULL; + } -out: + return obj; +} + +void *rxe_pool_get_key(struct rxe_pool *pool, void *key) +{ + u8 *obj; + unsigned long flags; + + read_lock_irqsave(&pool->pool_lock, flags); + obj = rxe_pool_get_key_locked(pool, key); read_unlock_irqrestore(&pool->pool_lock, flags); - return node ? elem : NULL; + + return obj; } diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h index 432745ffc8d4..61210b300a78 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.h +++ b/drivers/infiniband/sw/rxe/rxe_pool.h @@ -11,7 +11,6 @@ #define RXE_POOL_CACHE_FLAGS (0) enum rxe_pool_flags { - RXE_POOL_ATOMIC = BIT(0), RXE_POOL_INDEX = BIT(1), RXE_POOL_KEY = BIT(2), RXE_POOL_NO_ALLOC = BIT(4), @@ -36,6 +35,7 @@ struct rxe_pool_entry; struct rxe_type_info { const char *name; size_t size; + size_t elem_offset; void (*cleanup)(struct rxe_pool_entry *obj); enum rxe_pool_flags flags; u32 max_index; @@ -46,18 +46,16 @@ struct rxe_type_info { extern struct rxe_type_info rxe_type_info[]; -enum rxe_pool_state { - RXE_POOL_STATE_INVALID, - RXE_POOL_STATE_VALID, -}; - struct rxe_pool_entry { struct rxe_pool *pool; struct kref ref_cnt; struct list_head list; - /* only used if indexed or keyed */ - struct rb_node node; + /* only used if keyed */ + struct rb_node key_node; + + /* only used if indexed */ + struct rb_node index_node; u32 index; }; @@ -65,24 +63,29 @@ struct rxe_pool { struct rxe_dev *rxe; rwlock_t pool_lock; /* protects pool add/del/search */ size_t elem_size; - struct kref ref_cnt; void (*cleanup)(struct rxe_pool_entry *obj); - enum rxe_pool_state state; enum rxe_pool_flags flags; enum rxe_elem_type type; unsigned int max_elem; atomic_t num_elem; - /* only used if indexed or keyed */ - struct rb_root tree; - unsigned long *table; - size_t table_size; - u32 max_index; - u32 min_index; - u32 last; - size_t key_offset; - size_t key_size; + /* only used if indexed */ + struct { + struct rb_root tree; + unsigned long *table; + size_t table_size; + u32 last; + u32 max_index; + u32 min_index; + } index; + + /* only used if keyed */ + struct { + struct rb_root tree; + size_t key_offset; + size_t key_size; + } key; }; /* initialize a pool of objects with given limit on @@ -95,32 +98,70 @@ int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, /* free resources from object pool */ void rxe_pool_cleanup(struct rxe_pool *pool); -/* allocate an object from pool */ +/* allocate an object from pool holding and not holding the pool lock */ +void *rxe_alloc_locked(struct rxe_pool *pool); + void *rxe_alloc(struct rxe_pool *pool); /* connect already allocated object to pool */ -int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem); +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem); + +#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem) /* assign an index to an indexed object and insert object into - * pool's rb tree + * pool's rb tree holding and not holding the pool_lock */ -void rxe_add_index(void *elem); +void __rxe_add_index_locked(struct rxe_pool_entry *elem); + +#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem) -/* drop an index and remove object from rb tree */ -void rxe_drop_index(void *elem); +void __rxe_add_index(struct rxe_pool_entry *elem); + +#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem) + +/* drop an index and remove object from rb tree + * holding and not holding the pool_lock + */ +void __rxe_drop_index_locked(struct rxe_pool_entry *elem); + +#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem) + +void __rxe_drop_index(struct rxe_pool_entry *elem); + +#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem) /* assign a key to a keyed object and insert object into - * pool's rb tree + * pool's rb tree holding and not holding pool_lock */ -void rxe_add_key(void *elem, void *key); +void __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key); + +#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key) + +void __rxe_add_key(struct rxe_pool_entry *elem, void *key); + +#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key) + +/* remove elem from rb tree holding and not holding the pool_lock */ +void __rxe_drop_key_locked(struct rxe_pool_entry *elem); -/* remove elem from rb tree */ -void rxe_drop_key(void *elem); +#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem) + +void __rxe_drop_key(struct rxe_pool_entry *elem); + +#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem) + +/* lookup an indexed object from index holding and not holding the pool_lock. + * takes a reference on object + */ +void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index); -/* lookup an indexed object from index. takes a reference on object */ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index); -/* lookup keyed object from key. takes a reference on the object */ +/* lookup keyed object from key holding and not holding the pool_lock. + * takes a reference on the objecti + */ +void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key); + void *rxe_pool_get_key(struct rxe_pool *pool, void *key); /* cleanup an object when all references are dropped */ diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 656a5b4be847..34ae957a315c 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -62,6 +62,17 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init) struct rxe_port *port; int port_num = init->port_num; + switch (init->qp_type) { + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_RC: + case IB_QPT_UC: + case IB_QPT_UD: + break; + default: + return -EOPNOTSUPP; + } + if (!init->recv_cq || !init->send_cq) { pr_warn("missing cq\n"); goto err1; diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index c9984a28eecc..45d2f711bce2 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -9,21 +9,26 @@ #include "rxe.h" #include "rxe_loc.h" +/* check that QP matches packet opcode type and is in a valid state */ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp) { + unsigned int pkt_type; + if (unlikely(!qp->valid)) goto err1; + pkt_type = pkt->opcode & 0xe0; + switch (qp_type(qp)) { case IB_QPT_RC: - if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) { + if (unlikely(pkt_type != IB_OPCODE_RC)) { pr_warn_ratelimited("bad qp type\n"); goto err1; } break; case IB_QPT_UC: - if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) { + if (unlikely(pkt_type != IB_OPCODE_UC)) { pr_warn_ratelimited("bad qp type\n"); goto err1; } @@ -31,7 +36,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, case IB_QPT_UD: case IB_QPT_SMI: case IB_QPT_GSI: - if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) { + if (unlikely(pkt_type != IB_OPCODE_UD)) { pr_warn_ratelimited("bad qp type\n"); goto err1; } @@ -85,8 +90,7 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, goto err1; } - if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) && - pkt->mask) { + if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) { u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey; if (unlikely(deth_qkey(pkt) != qkey)) { @@ -252,7 +256,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) list_for_each_entry(mce, &mcg->qp_list, qp_list) { qp = mce->qp; - pkt = SKB_TO_PKT(skb); /* validate qp for incoming packet */ err = check_type_state(rxe, pkt, qp); @@ -264,12 +267,22 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) continue; /* for all but the last qp create a new clone of the - * skb and pass to the qp. + * skb and pass to the qp. If an error occurs in the + * checks for the last qp in the list we need to + * free the skb since it hasn't been passed on to + * rxe_rcv_pkt() which would free it later. */ - if (mce->qp_list.next != &mcg->qp_list) + if (mce->qp_list.next != &mcg->qp_list) { per_qp_skb = skb_clone(skb, GFP_ATOMIC); - else + if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) { + kfree_skb(per_qp_skb); + continue; + } + } else { per_qp_skb = skb; + /* show we have consumed the skb */ + skb = NULL; + } if (unlikely(!per_qp_skb)) continue; @@ -284,10 +297,10 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */ - return; - err1: + /* free skb if not consumed */ kfree_skb(skb); + ib_device_put(&rxe->ib_dev); } /** @@ -340,9 +353,7 @@ void rxe_rcv(struct sk_buff *skb) __be32 *icrcp; u32 calc_icrc, pack_icrc; - pkt->offset = 0; - - if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES)) + if (unlikely(skb->len < RXE_BTH_BYTES)) goto drop; if (rxe_chk_dgid(rxe, skb) < 0) { @@ -397,4 +408,5 @@ drop: rxe_drop_ref(pkt->qp); kfree_skb(skb); + ib_device_put(&rxe->ib_dev); } diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index d4917646641a..889290793d75 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -375,7 +375,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, pkt->psn = qp->req.psn; pkt->mask = rxe_opcode[opcode].mask; pkt->paylen = paylen; - pkt->offset = 0; pkt->wqe = wqe; /* init skb */ diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 5a098083a9d2..1ae94f2cb336 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -99,6 +99,7 @@ static inline enum resp_states get_req(struct rxe_qp *qp, while ((skb = skb_dequeue(&qp->req_pkts))) { rxe_drop_ref(qp); kfree_skb(skb); + ib_device_put(qp->ibqp.device); } /* go drain recv wr queue */ @@ -585,11 +586,10 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, ack->qp = qp; ack->opcode = opcode; ack->mask = rxe_opcode[opcode].mask; - ack->offset = pkt->offset; ack->paylen = paylen; /* fill in bth using the request packet headers */ - memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES); + memcpy(ack->hdr, pkt->hdr, RXE_BTH_BYTES); bth_set_opcode(ack, opcode); bth_set_qpn(ack, qp->attr.dest_qp_num); @@ -1012,6 +1012,7 @@ static enum resp_states cleanup(struct rxe_qp *qp, skb = skb_dequeue(&qp->req_pkts); rxe_drop_ref(qp); kfree_skb(skb); + ib_device_put(qp->ibqp.device); } if (qp->resp.mr) { @@ -1176,6 +1177,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) while ((skb = skb_dequeue(&qp->req_pkts))) { rxe_drop_ref(qp); kfree_skb(skb); + ib_device_put(qp->ibqp.device); } if (notify) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index a031514e2f41..dee5e0e919d2 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -106,12 +106,12 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, return IB_LINK_LAYER_ETHERNET; } -static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) +static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) { - struct rxe_dev *rxe = to_rdev(uctx->device); - struct rxe_ucontext *uc = to_ruc(uctx); + struct rxe_dev *rxe = to_rdev(ibuc->device); + struct rxe_ucontext *uc = to_ruc(ibuc); - return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem); + return rxe_add_to_pool(&rxe->uc_pool, uc); } static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) @@ -145,7 +145,7 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem); + return rxe_add_to_pool(&rxe->pd_pool, pd); } static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) @@ -169,7 +169,7 @@ static int rxe_create_ah(struct ib_ah *ibah, if (err) return err; - err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem); + err = rxe_add_to_pool(&rxe->ah_pool, ah); if (err) return err; @@ -273,7 +273,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, if (err) goto err1; - err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem); + err = rxe_add_to_pool(&rxe->srq_pool, srq); if (err) goto err1; @@ -555,37 +555,42 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, } } -static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, +static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, + const struct ib_send_wr *ibwr) +{ + struct ib_sge *sge = ibwr->sg_list; + u8 *p = wqe->dma.inline_data; + int i; + + for (i = 0; i < ibwr->num_sge; i++, sge++) { + memcpy(p, (void *)(uintptr_t)sge->addr, sge->length); + p += sge->length; + } +} + +static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, unsigned int mask, unsigned int length, struct rxe_send_wqe *wqe) { int num_sge = ibwr->num_sge; - struct ib_sge *sge; - int i; - u8 *p; init_send_wr(qp, &wqe->wr, ibwr); + /* local operation */ + if (unlikely(mask & WR_REG_MASK)) { + wqe->mask = mask; + wqe->state = wqe_state_posted; + return; + } + if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_SMI || qp_type(qp) == IB_QPT_GSI) memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av)); - if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) { - p = wqe->dma.inline_data; - - sge = ibwr->sg_list; - for (i = 0; i < num_sge; i++, sge++) { - memcpy(p, (void *)(uintptr_t)sge->addr, - sge->length); - - p += sge->length; - } - } else if (mask & WR_REG_MASK) { - wqe->mask = mask; - wqe->state = wqe_state_posted; - return 0; - } else + if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) + copy_inline_data_to_wqe(wqe, ibwr); + else memcpy(wqe->dma.sge, ibwr->sg_list, num_sge * sizeof(struct ib_sge)); @@ -599,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, wqe->dma.sge_offset = 0; wqe->state = wqe_state_posted; wqe->ssn = atomic_add_return(1, &qp->ssn); - - return 0; } static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, @@ -623,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, } send_wqe = producer_addr(sq->queue); - - err = init_send_wqe(qp, ibwr, mask, length, send_wqe); - if (unlikely(err)) - goto err1; + init_send_wqe(qp, ibwr, mask, length, send_wqe); advance_producer(sq->queue); spin_unlock_irqrestore(&qp->sq.sq_lock, flags); @@ -774,7 +774,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, if (err) return err; - return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem); + return rxe_add_to_pool(&rxe->cq_pool, cq); } static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) @@ -1118,7 +1118,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) struct ib_device *dev = &rxe->ib_dev; struct crypto_shash *tfm; - strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); + strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; |