diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_cq.c')
| -rw-r--r-- | drivers/infiniband/sw/rxe/rxe_cq.c | 91 |
1 files changed, 23 insertions, 68 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index aef288f164fd..fffd144d509e 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -14,25 +14,21 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int count; if (cqe <= 0) { - pr_warn("cqe(%d) <= 0\n", cqe); + rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe); goto err1; } if (cqe > rxe->attr.max_cqe) { - pr_warn("cqe(%d) > max_cqe(%d)\n", - cqe, rxe->attr.max_cqe); + rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n", + cqe, rxe->attr.max_cqe); goto err1; } if (cq) { - if (cq->is_user) - count = queue_count(cq->queue, QUEUE_TYPE_TO_USER); - else - count = queue_count(cq->queue, QUEUE_TYPE_KERNEL); - + count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT); if (cqe < count) { - pr_warn("cqe(%d) < current # elements in queue (%d)", - cqe, count); + rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n", + cqe, count); goto err1; } } @@ -43,21 +39,6 @@ err1: return -EINVAL; } -static void rxe_send_complete(struct tasklet_struct *t) -{ - struct rxe_cq *cq = from_tasklet(cq, t, comp_task); - unsigned long flags; - - spin_lock_irqsave(&cq->cq_lock, flags); - if (cq->is_dying) { - spin_unlock_irqrestore(&cq->cq_lock, flags); - return; - } - spin_unlock_irqrestore(&cq->cq_lock, flags); - - cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); -} - int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp) @@ -65,28 +46,20 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int err; enum queue_type type; - type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL; + type = QUEUE_TYPE_TO_CLIENT; cq->queue = rxe_queue_init(rxe, &cqe, sizeof(struct rxe_cqe), type); if (!cq->queue) { - pr_warn("unable to create cq\n"); + rxe_dbg_dev(rxe, "unable to create cq\n"); return -ENOMEM; } err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); - if (err) { - vfree(cq->queue->buf); - kfree(cq->queue); + if (err) return err; - } - if (uresp) - cq->is_user = 1; - - cq->is_dying = false; - - tasklet_setup(&cq->comp_task, rxe_send_complete); + cq->is_user = uresp; spin_lock_init(&cq->cq_lock); cq->ibcq.cqe = cqe; @@ -108,21 +81,19 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, return err; } +/* caller holds reference to cq */ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) { struct ib_event ev; - unsigned long flags; int full; void *addr; + unsigned long flags; spin_lock_irqsave(&cq->cq_lock, flags); - if (cq->is_user) - full = queue_full(cq->queue, QUEUE_TYPE_TO_USER); - else - full = queue_full(cq->queue, QUEUE_TYPE_KERNEL); - + full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT); if (unlikely(full)) { + rxe_err_cq(cq, "queue full\n"); spin_unlock_irqrestore(&cq->cq_lock, flags); if (cq->ibcq.event_handler) { ev.device = cq->ibcq.device; @@ -134,41 +105,25 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) return -EBUSY; } - if (cq->is_user) - addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER); - else - addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL); - + addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT); memcpy(addr, cqe, sizeof(*cqe)); - if (cq->is_user) - advance_producer(cq->queue, QUEUE_TYPE_TO_USER); - else - advance_producer(cq->queue, QUEUE_TYPE_KERNEL); + queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT); - spin_unlock_irqrestore(&cq->cq_lock, flags); - - if ((cq->notify == IB_CQ_NEXT_COMP) || - (cq->notify == IB_CQ_SOLICITED && solicited)) { + if ((cq->notify & IB_CQ_NEXT_COMP) || + (cq->notify & IB_CQ_SOLICITED && solicited)) { cq->notify = 0; - tasklet_schedule(&cq->comp_task); + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } - return 0; -} - -void rxe_cq_disable(struct rxe_cq *cq) -{ - unsigned long flags; - - spin_lock_irqsave(&cq->cq_lock, flags); - cq->is_dying = true; spin_unlock_irqrestore(&cq->cq_lock, flags); + + return 0; } -void rxe_cq_cleanup(struct rxe_pool_entry *arg) +void rxe_cq_cleanup(struct rxe_pool_elem *elem) { - struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem); + struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); if (cq->queue) rxe_queue_cleanup(cq->queue); |
