summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/hns/hns_roce_qp.c
diff options
context:
space:
mode:
authorWeihang Li <liweihang@huawei.com>2021-05-28 17:37:41 +0800
committerJason Gunthorpe <jgg@nvidia.com>2021-06-08 14:58:51 -0300
commit8f9513d89f0417d3ca0a99b5f63c84b4cf2ed5fa (patch)
tree98dd4ec5b5709c4fc537ff127b02af7f7097a32e /drivers/infiniband/hw/hns/hns_roce_qp.c
parent33649cd3f9497523c7110337b9c6f08ab43746c6 (diff)
RDMA/hns: Use refcount_t instead of atomic_t for QP reference counting
The refcount_t API will WARN on underflow and overflow of a reference counter, and avoid use-after-free risks. Link: https://lore.kernel.org/r/1622194663-2383-11-git-send-email-liweihang@huawei.com Signed-off-by: Weihang Li <liweihang@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_qp.c')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9203cf189dd5..3a018a308a60 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -65,7 +65,7 @@ static void flush_work_handle(struct work_struct *work)
* make sure we signal QP destroy leg that flush QP was completed
* so that it can safely proceed ahead now and destroy QP
*/
- if (atomic_dec_and_test(&hr_qp->refcount))
+ if (refcount_dec_and_test(&hr_qp->refcount))
complete(&hr_qp->free);
}
@@ -75,7 +75,7 @@ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
flush_work->hr_dev = hr_dev;
INIT_WORK(&flush_work->work, flush_work_handle);
- atomic_inc(&hr_qp->refcount);
+ refcount_inc(&hr_qp->refcount);
queue_work(hr_dev->irq_workq, &flush_work->work);
}
@@ -87,7 +87,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
xa_lock(&hr_dev->qp_table_xa);
qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (qp)
- atomic_inc(&qp->refcount);
+ refcount_inc(&qp->refcount);
xa_unlock(&hr_dev->qp_table_xa);
if (!qp) {
@@ -108,7 +108,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
qp->event(qp, (enum hns_roce_event)event_type);
- if (atomic_dec_and_test(&qp->refcount))
+ if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
}
@@ -1076,7 +1076,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->ibqp.qp_num = hr_qp->qpn;
hr_qp->event = hns_roce_ib_qp_event;
- atomic_set(&hr_qp->refcount, 1);
+ refcount_set(&hr_qp->refcount, 1);
init_completion(&hr_qp->free);
return 0;
@@ -1099,7 +1099,7 @@ err_buf:
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_udata *udata)
{
- if (atomic_dec_and_test(&hr_qp->refcount))
+ if (refcount_dec_and_test(&hr_qp->refcount))
complete(&hr_qp->free);
wait_for_completion(&hr_qp->free);