diff options
Diffstat (limited to 'drivers/infiniband/hw/ionic')
-rw-r--r-- | drivers/infiniband/hw/ionic/Kconfig | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/Makefile | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_admin.c | 1229 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_controlpath.c | 2679 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_datapath.c | 1399 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_fw.h | 1029 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_hw_stats.c | 484 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_ibdev.c | 440 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_ibdev.h | 517 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_lif_cfg.c | 111 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_lif_cfg.h | 66 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_pgtbl.c | 143 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_queue.c | 52 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_queue.h | 234 | ||||
-rw-r--r-- | drivers/infiniband/hw/ionic/ionic_res.h | 154 |
15 files changed, 8561 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ionic/Kconfig b/drivers/infiniband/hw/ionic/Kconfig new file mode 100644 index 000000000000..de6f10e9b6e9 --- /dev/null +++ b/drivers/infiniband/hw/ionic/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2018-2025, Advanced Micro Devices, Inc. + +config INFINIBAND_IONIC + tristate "AMD Pensando DSC RDMA/RoCE Support" + depends on NETDEVICES && ETHERNET && PCI && INET && IONIC + help + This enables RDMA/RoCE support for the AMD Pensando family of + Distributed Services Cards (DSCs). + + To learn more, visit our website at + <https://www.amd.com/en/products/accelerators/pensando.html>. + + To compile this driver as a module, choose M here. The module + will be called ionic_rdma. diff --git a/drivers/infiniband/hw/ionic/Makefile b/drivers/infiniband/hw/ionic/Makefile new file mode 100644 index 000000000000..957973742820 --- /dev/null +++ b/drivers/infiniband/hw/ionic/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-y := -I $(srctree)/drivers/net/ethernet/pensando/ionic + +obj-$(CONFIG_INFINIBAND_IONIC) += ionic_rdma.o + +ionic_rdma-y := \ + ionic_ibdev.o ionic_lif_cfg.o ionic_queue.o ionic_pgtbl.o ionic_admin.o \ + ionic_controlpath.o ionic_datapath.o ionic_hw_stats.o diff --git a/drivers/infiniband/hw/ionic/ionic_admin.c b/drivers/infiniband/hw/ionic/ionic_admin.c new file mode 100644 index 000000000000..2537aa55d12d --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_admin.c @@ -0,0 +1,1229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/printk.h> + +#include "ionic_fw.h" +#include "ionic_ibdev.h" + +#define IONIC_EQ_COUNT_MIN 4 +#define IONIC_AQ_COUNT_MIN 1 + +/* not a valid queue position or negative error status */ +#define IONIC_ADMIN_POSTED 0x10000 + +/* cpu can be held with irq disabled for COUNT * MS (for create/destroy_ah) */ +#define IONIC_ADMIN_BUSY_RETRY_COUNT 2000 +#define IONIC_ADMIN_BUSY_RETRY_MS 1 + +/* admin queue will be considered failed if a command takes longer */ +#define IONIC_ADMIN_TIMEOUT (HZ * 2) +#define IONIC_ADMIN_WARN (HZ / 8) + +/* will poll for admin cq to tolerate and report from missed event */ +#define IONIC_ADMIN_DELAY (HZ / 8) + +/* work queue for polling the event queue and admin cq */ +struct workqueue_struct *ionic_evt_workq; + +static void ionic_admin_timedout(struct ionic_aq *aq) +{ + struct ionic_ibdev *dev = aq->dev; + unsigned long irqflags; + u16 pos; + + spin_lock_irqsave(&aq->lock, irqflags); + if (ionic_queue_empty(&aq->q)) + goto out; + + /* Reset ALL adminq if any one times out */ + if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED) + queue_work(ionic_evt_workq, &dev->reset_work); + + ibdev_err(&dev->ibdev, "admin command timed out, aq %d after: %ums\n", + aq->aqid, (u32)jiffies_to_msecs(jiffies - aq->stamp)); + + pos = (aq->q.prod - 1) & aq->q.mask; + if (pos == aq->q.cons) + goto out; + + ibdev_warn(&dev->ibdev, "admin pos %u (last posted)\n", pos); + print_hex_dump(KERN_WARNING, "cmd ", DUMP_PREFIX_OFFSET, 16, 1, + ionic_queue_at(&aq->q, pos), + BIT(aq->q.stride_log2), true); + +out: + spin_unlock_irqrestore(&aq->lock, irqflags); +} + +static void ionic_admin_reset_dwork(struct ionic_ibdev *dev) +{ + if (atomic_read(&dev->admin_state) == IONIC_ADMIN_KILLED) + return; + + queue_delayed_work(ionic_evt_workq, &dev->admin_dwork, + IONIC_ADMIN_DELAY); +} + +static void ionic_admin_reset_wdog(struct ionic_aq *aq) +{ + if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED) + return; + + aq->stamp = jiffies; + ionic_admin_reset_dwork(aq->dev); +} + +static bool ionic_admin_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq, + struct ionic_v1_cqe **cqe) +{ + struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q); + + if (unlikely(cq->color != ionic_v1_cqe_color(qcqe))) + return false; + + /* Prevent out-of-order reads of the CQE */ + dma_rmb(); + *cqe = qcqe; + + return true; +} + +static void ionic_admin_poll_locked(struct ionic_aq *aq) +{ + struct ionic_cq *cq = &aq->vcq->cq[0]; + struct ionic_admin_wr *wr, *wr_next; + struct ionic_ibdev *dev = aq->dev; + u32 wr_strides, avlbl_strides; + struct ionic_v1_cqe *cqe; + u32 qtf, qid; + u16 old_prod; + u8 type; + + lockdep_assert_held(&aq->lock); + + if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED) { + list_for_each_entry_safe(wr, wr_next, &aq->wr_prod, aq_ent) { + INIT_LIST_HEAD(&wr->aq_ent); + aq->q_wr[wr->status].wr = NULL; + wr->status = atomic_read(&aq->admin_state); + complete_all(&wr->work); + } + INIT_LIST_HEAD(&aq->wr_prod); + + list_for_each_entry_safe(wr, wr_next, &aq->wr_post, aq_ent) { + INIT_LIST_HEAD(&wr->aq_ent); + wr->status = atomic_read(&aq->admin_state); + complete_all(&wr->work); + } + INIT_LIST_HEAD(&aq->wr_post); + + return; + } + + old_prod = cq->q.prod; + + while (ionic_admin_next_cqe(dev, cq, &cqe)) { + qtf = ionic_v1_cqe_qtf(cqe); + qid = ionic_v1_cqe_qtf_qid(qtf); + type = ionic_v1_cqe_qtf_type(qtf); + + if (unlikely(type != IONIC_V1_CQE_TYPE_ADMIN)) { + ibdev_warn_ratelimited(&dev->ibdev, + "bad cqe type %u\n", type); + goto cq_next; + } + + if (unlikely(qid != aq->aqid)) { + ibdev_warn_ratelimited(&dev->ibdev, + "bad cqe qid %u\n", qid); + goto cq_next; + } + + if (unlikely(be16_to_cpu(cqe->admin.cmd_idx) != aq->q.cons)) { + ibdev_warn_ratelimited(&dev->ibdev, + "bad idx %u cons %u qid %u\n", + be16_to_cpu(cqe->admin.cmd_idx), + aq->q.cons, qid); + goto cq_next; + } + + if (unlikely(ionic_queue_empty(&aq->q))) { + ibdev_warn_ratelimited(&dev->ibdev, + "bad cqe for empty adminq\n"); + goto cq_next; + } + + wr = aq->q_wr[aq->q.cons].wr; + if (wr) { + aq->q_wr[aq->q.cons].wr = NULL; + list_del_init(&wr->aq_ent); + + wr->cqe = *cqe; + wr->status = atomic_read(&aq->admin_state); + complete_all(&wr->work); + } + + ionic_queue_consume_entries(&aq->q, + aq->q_wr[aq->q.cons].wqe_strides); + +cq_next: + ionic_queue_produce(&cq->q); + cq->color = ionic_color_wrap(cq->q.prod, cq->color); + } + + if (old_prod != cq->q.prod) { + ionic_admin_reset_wdog(aq); + cq->q.cons = cq->q.prod; + ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, + ionic_queue_dbell_val(&cq->q)); + queue_work(ionic_evt_workq, &aq->work); + } else if (!aq->armed) { + aq->armed = true; + cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod); + ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, + cq->q.dbell | IONIC_CQ_RING_ARM | + cq->arm_any_prod); + queue_work(ionic_evt_workq, &aq->work); + } + + if (atomic_read(&aq->admin_state) != IONIC_ADMIN_ACTIVE) + return; + + old_prod = aq->q.prod; + + if (ionic_queue_empty(&aq->q) && !list_empty(&aq->wr_post)) + ionic_admin_reset_wdog(aq); + + if (list_empty(&aq->wr_post)) + return; + + do { + u8 *src; + int i, src_len; + size_t stride_len; + + wr = list_first_entry(&aq->wr_post, struct ionic_admin_wr, + aq_ent); + wr_strides = (le16_to_cpu(wr->wqe.len) + ADMIN_WQE_HDR_LEN + + (ADMIN_WQE_STRIDE - 1)) >> aq->q.stride_log2; + avlbl_strides = ionic_queue_length_remaining(&aq->q); + + if (wr_strides > avlbl_strides) + break; + + list_move(&wr->aq_ent, &aq->wr_prod); + wr->status = aq->q.prod; + aq->q_wr[aq->q.prod].wr = wr; + aq->q_wr[aq->q.prod].wqe_strides = wr_strides; + + src_len = le16_to_cpu(wr->wqe.len); + src = (uint8_t *)&wr->wqe.cmd; + + /* First stride */ + memcpy(ionic_queue_at_prod(&aq->q), &wr->wqe, + ADMIN_WQE_HDR_LEN); + stride_len = ADMIN_WQE_STRIDE - ADMIN_WQE_HDR_LEN; + if (stride_len > src_len) + stride_len = src_len; + memcpy(ionic_queue_at_prod(&aq->q) + ADMIN_WQE_HDR_LEN, + src, stride_len); + ibdev_dbg(&dev->ibdev, "post admin prod %u (%u strides)\n", + aq->q.prod, wr_strides); + print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1, + ionic_queue_at_prod(&aq->q), + BIT(aq->q.stride_log2), true); + ionic_queue_produce(&aq->q); + + /* Remaining strides */ + for (i = stride_len; i < src_len; i += stride_len) { + stride_len = ADMIN_WQE_STRIDE; + + if (i + stride_len > src_len) + stride_len = src_len - i; + + memcpy(ionic_queue_at_prod(&aq->q), src + i, + stride_len); + print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1, + ionic_queue_at_prod(&aq->q), + BIT(aq->q.stride_log2), true); + ionic_queue_produce(&aq->q); + } + } while (!list_empty(&aq->wr_post)); + + if (old_prod != aq->q.prod) + ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.aq_qtype, + ionic_queue_dbell_val(&aq->q)); +} + +static void ionic_admin_dwork(struct work_struct *ws) +{ + struct ionic_ibdev *dev = + container_of(ws, struct ionic_ibdev, admin_dwork.work); + struct ionic_aq *aq, *bad_aq = NULL; + bool do_reschedule = false; + unsigned long irqflags; + bool do_reset = false; + u16 pos; + int i; + + for (i = 0; i < dev->lif_cfg.aq_count; i++) { + aq = dev->aq_vec[i]; + + spin_lock_irqsave(&aq->lock, irqflags); + + if (ionic_queue_empty(&aq->q)) + goto next_aq; + + /* Reschedule if any queue has outstanding work */ + do_reschedule = true; + + if (time_is_after_eq_jiffies(aq->stamp + IONIC_ADMIN_WARN)) + /* Warning threshold not met, nothing to do */ + goto next_aq; + + /* See if polling now makes some progress */ + pos = aq->q.cons; + ionic_admin_poll_locked(aq); + if (pos != aq->q.cons) { + ibdev_dbg(&dev->ibdev, + "missed event for acq %d\n", aq->cqid); + goto next_aq; + } + + if (time_is_after_eq_jiffies(aq->stamp + + IONIC_ADMIN_TIMEOUT)) { + /* Timeout threshold not met */ + ibdev_dbg(&dev->ibdev, "no progress after %ums\n", + (u32)jiffies_to_msecs(jiffies - aq->stamp)); + goto next_aq; + } + + /* Queue timed out */ + bad_aq = aq; + do_reset = true; +next_aq: + spin_unlock_irqrestore(&aq->lock, irqflags); + } + + if (do_reset) + /* Reset RDMA lif on a timeout */ + ionic_admin_timedout(bad_aq); + else if (do_reschedule) + /* Try to poll again later */ + ionic_admin_reset_dwork(dev); +} + +static void ionic_admin_work(struct work_struct *ws) +{ + struct ionic_aq *aq = container_of(ws, struct ionic_aq, work); + unsigned long irqflags; + + spin_lock_irqsave(&aq->lock, irqflags); + ionic_admin_poll_locked(aq); + spin_unlock_irqrestore(&aq->lock, irqflags); +} + +static void ionic_admin_post_aq(struct ionic_aq *aq, struct ionic_admin_wr *wr) +{ + unsigned long irqflags; + bool poll; + + wr->status = IONIC_ADMIN_POSTED; + wr->aq = aq; + + spin_lock_irqsave(&aq->lock, irqflags); + poll = list_empty(&aq->wr_post); + list_add(&wr->aq_ent, &aq->wr_post); + if (poll) + ionic_admin_poll_locked(aq); + spin_unlock_irqrestore(&aq->lock, irqflags); +} + +void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr) +{ + int aq_idx; + + /* Use cpu id for the adminq selection */ + aq_idx = raw_smp_processor_id() % dev->lif_cfg.aq_count; + ionic_admin_post_aq(dev->aq_vec[aq_idx], wr); +} + +static void ionic_admin_cancel(struct ionic_admin_wr *wr) +{ + struct ionic_aq *aq = wr->aq; + unsigned long irqflags; + + spin_lock_irqsave(&aq->lock, irqflags); + + if (!list_empty(&wr->aq_ent)) { + list_del(&wr->aq_ent); + if (wr->status != IONIC_ADMIN_POSTED) + aq->q_wr[wr->status].wr = NULL; + } + + spin_unlock_irqrestore(&aq->lock, irqflags); +} + +static int ionic_admin_busy_wait(struct ionic_admin_wr *wr) +{ + struct ionic_aq *aq = wr->aq; + unsigned long irqflags; + int try_i; + + for (try_i = 0; try_i < IONIC_ADMIN_BUSY_RETRY_COUNT; ++try_i) { + if (completion_done(&wr->work)) + return 0; + + mdelay(IONIC_ADMIN_BUSY_RETRY_MS); + + spin_lock_irqsave(&aq->lock, irqflags); + ionic_admin_poll_locked(aq); + spin_unlock_irqrestore(&aq->lock, irqflags); + } + + /* + * we timed out. Initiate RDMA LIF reset and indicate + * error to caller. + */ + ionic_admin_timedout(aq); + return -ETIMEDOUT; +} + +int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr, + enum ionic_admin_flags flags) +{ + int rc, timo; + + if (flags & IONIC_ADMIN_F_BUSYWAIT) { + /* Spin */ + rc = ionic_admin_busy_wait(wr); + } else if (flags & IONIC_ADMIN_F_INTERRUPT) { + /* + * Interruptible sleep, 1s timeout + * This is used for commands which are safe for the caller + * to clean up without killing and resetting the adminq. + */ + timo = wait_for_completion_interruptible_timeout(&wr->work, + HZ); + if (timo > 0) + rc = 0; + else if (timo == 0) + rc = -ETIMEDOUT; + else + rc = timo; + } else { + /* + * Uninterruptible sleep + * This is used for commands which are NOT safe for the + * caller to clean up. Cleanup must be handled by the + * adminq kill and reset process so that host memory is + * not corrupted by the device. + */ + wait_for_completion(&wr->work); + rc = 0; + } + + if (rc) { + ibdev_warn(&dev->ibdev, "wait status %d\n", rc); + ionic_admin_cancel(wr); + } else if (wr->status == IONIC_ADMIN_KILLED) { + ibdev_dbg(&dev->ibdev, "admin killed\n"); + + /* No error if admin already killed during teardown */ + rc = (flags & IONIC_ADMIN_F_TEARDOWN) ? 0 : -ENODEV; + } else if (ionic_v1_cqe_error(&wr->cqe)) { + ibdev_warn(&dev->ibdev, "opcode %u error %u\n", + wr->wqe.op, + be32_to_cpu(wr->cqe.status_length)); + rc = -EINVAL; + } + return rc; +} + +static int ionic_rdma_devcmd(struct ionic_ibdev *dev, + struct ionic_admin_ctx *admin) +{ + int rc; + + rc = ionic_adminq_post_wait(dev->lif_cfg.lif, admin); + if (rc) + return rc; + + return ionic_error_to_errno(admin->comp.comp.status); +} + +int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev) +{ + struct ionic_admin_ctx admin = { + .work = COMPLETION_INITIALIZER_ONSTACK(admin.work), + .cmd.rdma_reset = { + .opcode = IONIC_CMD_RDMA_RESET_LIF, + .lif_index = cpu_to_le16(dev->lif_cfg.lif_index), + }, + }; + + return ionic_rdma_devcmd(dev, &admin); +} + +static int ionic_rdma_queue_devcmd(struct ionic_ibdev *dev, + struct ionic_queue *q, + u32 qid, u32 cid, u16 opcode) +{ + struct ionic_admin_ctx admin = { + .work = COMPLETION_INITIALIZER_ONSTACK(admin.work), + .cmd.rdma_queue = { + .opcode = opcode, + .lif_index = cpu_to_le16(dev->lif_cfg.lif_index), + .qid_ver = cpu_to_le32(qid), + .cid = cpu_to_le32(cid), + .dbid = cpu_to_le16(dev->lif_cfg.dbid), + .depth_log2 = q->depth_log2, + .stride_log2 = q->stride_log2, + .dma_addr = cpu_to_le64(q->dma), + }, + }; + + return ionic_rdma_devcmd(dev, &admin); +} + +static void ionic_rdma_admincq_comp(struct ib_cq *ibcq, void *cq_context) +{ + struct ionic_aq *aq = cq_context; + unsigned long irqflags; + + spin_lock_irqsave(&aq->lock, irqflags); + aq->armed = false; + if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED) + queue_work(ionic_evt_workq, &aq->work); + spin_unlock_irqrestore(&aq->lock, irqflags); +} + +static void ionic_rdma_admincq_event(struct ib_event *event, void *cq_context) +{ + struct ionic_aq *aq = cq_context; + + ibdev_err(&aq->dev->ibdev, "admincq event %d\n", event->event); +} + +static struct ionic_vcq *ionic_create_rdma_admincq(struct ionic_ibdev *dev, + int comp_vector) +{ + struct ib_cq_init_attr attr = { + .cqe = IONIC_AQ_DEPTH, + .comp_vector = comp_vector, + }; + struct ionic_tbl_buf buf = {}; + struct ionic_vcq *vcq; + struct ionic_cq *cq; + int rc; + + vcq = kzalloc(sizeof(*vcq), GFP_KERNEL); + if (!vcq) + return ERR_PTR(-ENOMEM); + + vcq->ibcq.device = &dev->ibdev; + vcq->ibcq.comp_handler = ionic_rdma_admincq_comp; + vcq->ibcq.event_handler = ionic_rdma_admincq_event; + atomic_set(&vcq->ibcq.usecnt, 0); + + vcq->udma_mask = 1; + cq = &vcq->cq[0]; + + rc = ionic_create_cq_common(vcq, &buf, &attr, NULL, NULL, + NULL, NULL, 0); + if (rc) + goto err_init; + + rc = ionic_rdma_queue_devcmd(dev, &cq->q, cq->cqid, cq->eqid, + IONIC_CMD_RDMA_CREATE_CQ); + if (rc) + goto err_cmd; + + return vcq; + +err_cmd: + ionic_destroy_cq_common(dev, cq); +err_init: + kfree(vcq); + + return ERR_PTR(rc); +} + +static struct ionic_aq *__ionic_create_rdma_adminq(struct ionic_ibdev *dev, + u32 aqid, u32 cqid) +{ + struct ionic_aq *aq; + int rc; + + aq = kzalloc(sizeof(*aq), GFP_KERNEL); + if (!aq) + return ERR_PTR(-ENOMEM); + + atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED); + aq->dev = dev; + aq->aqid = aqid; + aq->cqid = cqid; + spin_lock_init(&aq->lock); + + rc = ionic_queue_init(&aq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH, + ADMIN_WQE_STRIDE); + if (rc) + goto err_q; + + ionic_queue_dbell_init(&aq->q, aq->aqid); + + aq->q_wr = kcalloc((u32)aq->q.mask + 1, sizeof(*aq->q_wr), GFP_KERNEL); + if (!aq->q_wr) { + rc = -ENOMEM; + goto err_wr; + } + + INIT_LIST_HEAD(&aq->wr_prod); + INIT_LIST_HEAD(&aq->wr_post); + + INIT_WORK(&aq->work, ionic_admin_work); + aq->armed = false; + + return aq; + +err_wr: + ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev); +err_q: + kfree(aq); + + return ERR_PTR(rc); +} + +static void __ionic_destroy_rdma_adminq(struct ionic_ibdev *dev, + struct ionic_aq *aq) +{ + kfree(aq->q_wr); + ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev); + kfree(aq); +} + +static struct ionic_aq *ionic_create_rdma_adminq(struct ionic_ibdev *dev, + u32 aqid, u32 cqid) +{ + struct ionic_aq *aq; + int rc; + + aq = __ionic_create_rdma_adminq(dev, aqid, cqid); + if (IS_ERR(aq)) + return aq; + + rc = ionic_rdma_queue_devcmd(dev, &aq->q, aq->aqid, aq->cqid, + IONIC_CMD_RDMA_CREATE_ADMINQ); + if (rc) + goto err_cmd; + + return aq; + +err_cmd: + __ionic_destroy_rdma_adminq(dev, aq); + + return ERR_PTR(rc); +} + +static void ionic_flush_qs(struct ionic_ibdev *dev) +{ + struct ionic_qp *qp, *qp_tmp; + struct ionic_cq *cq, *cq_tmp; + LIST_HEAD(flush_list); + unsigned long index; + + WARN_ON(!irqs_disabled()); + + /* Flush qp send and recv */ + xa_lock(&dev->qp_tbl); + xa_for_each(&dev->qp_tbl, index, qp) { + kref_get(&qp->qp_kref); + list_add_tail(&qp->ibkill_flush_ent, &flush_list); + } + xa_unlock(&dev->qp_tbl); + + list_for_each_entry_safe(qp, qp_tmp, &flush_list, ibkill_flush_ent) { + ionic_flush_qp(dev, qp); + kref_put(&qp->qp_kref, ionic_qp_complete); + list_del(&qp->ibkill_flush_ent); + } + + /* Notify completions */ + xa_lock(&dev->cq_tbl); + xa_for_each(&dev->cq_tbl, index, cq) { + kref_get(&cq->cq_kref); + list_add_tail(&cq->ibkill_flush_ent, &flush_list); + } + xa_unlock(&dev->cq_tbl); + + list_for_each_entry_safe(cq, cq_tmp, &flush_list, ibkill_flush_ent) { + ionic_notify_flush_cq(cq); + kref_put(&cq->cq_kref, ionic_cq_complete); + list_del(&cq->ibkill_flush_ent); + } +} + +static void ionic_kill_ibdev(struct ionic_ibdev *dev, bool fatal_path) +{ + unsigned long irqflags; + bool do_flush = false; + int i; + + /* Mark AQs for drain and flush the QPs while irq is disabled */ + local_irq_save(irqflags); + + /* Mark the admin queue, flushing at most once */ + for (i = 0; i < dev->lif_cfg.aq_count; i++) { + struct ionic_aq *aq = dev->aq_vec[i]; + + spin_lock(&aq->lock); + if (atomic_read(&aq->admin_state) != IONIC_ADMIN_KILLED) { + atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED); + /* Flush incomplete admin commands */ + ionic_admin_poll_locked(aq); + do_flush = true; + } + spin_unlock(&aq->lock); + } + + if (do_flush) + ionic_flush_qs(dev); + + local_irq_restore(irqflags); + + /* Post a fatal event if requested */ + if (fatal_path) { + struct ib_event ev; + + ev.device = &dev->ibdev; + ev.element.port_num = 1; + ev.event = IB_EVENT_DEVICE_FATAL; + + ib_dispatch_event(&ev); + } + + atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED); +} + +void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path) +{ + enum ionic_admin_state old_state; + unsigned long irqflags = 0; + int i, rc; + + if (!dev->aq_vec) + return; + + /* + * Admin queues are transitioned from active to paused to killed state. + * When in paused state, no new commands are issued to the device, + * nor are any completed locally. After resetting the lif, it will be + * safe to resume the rdma admin queues in the killed state. Commands + * will not be issued to the device, but will complete locally with status + * IONIC_ADMIN_KILLED. Handling completion will ensure that creating or + * modifying resources fails, but destroying resources succeeds. + * If there was a failure resetting the lif using this strategy, + * then the state of the device is unknown. + */ + old_state = atomic_cmpxchg(&dev->admin_state, IONIC_ADMIN_ACTIVE, + IONIC_ADMIN_PAUSED); + if (old_state != IONIC_ADMIN_ACTIVE) + return; + + /* Pause all the AQs */ + local_irq_save(irqflags); + for (i = 0; i < dev->lif_cfg.aq_count; i++) { + struct ionic_aq *aq = dev->aq_vec[i]; + + spin_lock(&aq->lock); + /* pause rdma admin queues to reset lif */ + if (atomic_read(&aq->admin_state) == IONIC_ADMIN_ACTIVE) + atomic_set(&aq->admin_state, IONIC_ADMIN_PAUSED); + spin_unlock(&aq->lock); + } + local_irq_restore(irqflags); + + rc = ionic_rdma_reset_devcmd(dev); + if (unlikely(rc)) { + ibdev_err(&dev->ibdev, "failed to reset rdma %d\n", rc); + ionic_request_rdma_reset(dev->lif_cfg.lif); + } + + ionic_kill_ibdev(dev, fatal_path); +} + +static void ionic_reset_work(struct work_struct *ws) +{ + struct ionic_ibdev *dev = + container_of(ws, struct ionic_ibdev, reset_work); + + ionic_kill_rdma_admin(dev, true); +} + +static bool ionic_next_eqe(struct ionic_eq *eq, struct ionic_v1_eqe *eqe) +{ + struct ionic_v1_eqe *qeqe; + bool color; + + qeqe = ionic_queue_at_prod(&eq->q); + color = ionic_v1_eqe_color(qeqe); + + /* cons is color for eq */ + if (eq->q.cons != color) + return false; + + /* Prevent out-of-order reads of the EQE */ + dma_rmb(); + + ibdev_dbg(&eq->dev->ibdev, "poll eq prod %u\n", eq->q.prod); + print_hex_dump_debug("eqe ", DUMP_PREFIX_OFFSET, 16, 1, + qeqe, BIT(eq->q.stride_log2), true); + *eqe = *qeqe; + + return true; +} + +static void ionic_cq_event(struct ionic_ibdev *dev, u32 cqid, u8 code) +{ + unsigned long irqflags; + struct ib_event ibev; + struct ionic_cq *cq; + + xa_lock_irqsave(&dev->cq_tbl, irqflags); + cq = xa_load(&dev->cq_tbl, cqid); + if (cq) + kref_get(&cq->cq_kref); + xa_unlock_irqrestore(&dev->cq_tbl, irqflags); + + if (!cq) { + ibdev_dbg(&dev->ibdev, + "missing cqid %#x code %u\n", cqid, code); + return; + } + + switch (code) { + case IONIC_V1_EQE_CQ_NOTIFY: + if (cq->vcq->ibcq.comp_handler) + cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq, + cq->vcq->ibcq.cq_context); + break; + + case IONIC_V1_EQE_CQ_ERR: + if (cq->vcq->ibcq.event_handler) { + ibev.event = IB_EVENT_CQ_ERR; + ibev.device = &dev->ibdev; + ibev.element.cq = &cq->vcq->ibcq; + + cq->vcq->ibcq.event_handler(&ibev, + cq->vcq->ibcq.cq_context); + } + break; + + default: + ibdev_dbg(&dev->ibdev, + "unrecognized cqid %#x code %u\n", cqid, code); + break; + } + + kref_put(&cq->cq_kref, ionic_cq_complete); +} + +static void ionic_qp_event(struct ionic_ibdev *dev, u32 qpid, u8 code) +{ + unsigned long irqflags; + struct ib_event ibev; + struct ionic_qp *qp; + + xa_lock_irqsave(&dev->qp_tbl, irqflags); + qp = xa_load(&dev->qp_tbl, qpid); + if (qp) + kref_get(&qp->qp_kref); + xa_unlock_irqrestore(&dev->qp_tbl, irqflags); + + if (!qp) { + ibdev_dbg(&dev->ibdev, + "missing qpid %#x code %u\n", qpid, code); + return; + } + + ibev.device = &dev->ibdev; + ibev.element.qp = &qp->ibqp; + + switch (code) { + case IONIC_V1_EQE_SQ_DRAIN: + ibev.event = IB_EVENT_SQ_DRAINED; + break; + + case IONIC_V1_EQE_QP_COMM_EST: + ibev.event = IB_EVENT_COMM_EST; + break; + + case IONIC_V1_EQE_QP_LAST_WQE: + ibev.event = IB_EVENT_QP_LAST_WQE_REACHED; + break; + + case IONIC_V1_EQE_QP_ERR: + ibev.event = IB_EVENT_QP_FATAL; + break; + + case IONIC_V1_EQE_QP_ERR_REQUEST: + ibev.event = IB_EVENT_QP_REQ_ERR; + break; + + case IONIC_V1_EQE_QP_ERR_ACCESS: + ibev.event = IB_EVENT_QP_ACCESS_ERR; + break; + + default: + ibdev_dbg(&dev->ibdev, + "unrecognized qpid %#x code %u\n", qpid, code); + goto out; + } + + if (qp->ibqp.event_handler) + qp->ibqp.event_handler(&ibev, qp->ibqp.qp_context); + +out: + kref_put(&qp->qp_kref, ionic_qp_complete); +} + +static u16 ionic_poll_eq(struct ionic_eq *eq, u16 budget) +{ + struct ionic_ibdev *dev = eq->dev; + struct ionic_v1_eqe eqe; + u16 npolled = 0; + u8 type, code; + u32 evt, qid; + + while (npolled < budget) { + if (!ionic_next_eqe(eq, &eqe)) + break; + + ionic_queue_produce(&eq->q); + + /* cons is color for eq */ + eq->q.cons = ionic_color_wrap(eq->q.prod, eq->q.cons); + + ++npolled; + + evt = ionic_v1_eqe_evt(&eqe); + type = ionic_v1_eqe_evt_type(evt); + code = ionic_v1_eqe_evt_code(evt); + qid = ionic_v1_eqe_evt_qid(evt); + + switch (type) { + case IONIC_V1_EQE_TYPE_CQ: + ionic_cq_event(dev, qid, code); + break; + + case IONIC_V1_EQE_TYPE_QP: + ionic_qp_event(dev, qid, code); + break; + + default: + ibdev_dbg(&dev->ibdev, + "unknown event %#x type %u\n", evt, type); + } + } + + return npolled; +} + +static void ionic_poll_eq_work(struct work_struct *work) +{ + struct ionic_eq *eq = container_of(work, struct ionic_eq, work); + u32 npolled; + + if (unlikely(!eq->enable) || WARN_ON(eq->armed)) + return; + + npolled = ionic_poll_eq(eq, IONIC_EQ_WORK_BUDGET); + if (npolled == IONIC_EQ_WORK_BUDGET) { + ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr, + npolled, 0); + queue_work(ionic_evt_workq, &eq->work); + } else { + xchg(&eq->armed, 1); + ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr, + 0, IONIC_INTR_CRED_UNMASK); + } +} + +static irqreturn_t ionic_poll_eq_isr(int irq, void *eqptr) +{ + struct ionic_eq *eq = eqptr; + int was_armed; + u32 npolled; + + was_armed = xchg(&eq->armed, 0); + + if (unlikely(!eq->enable) || !was_armed) + return IRQ_HANDLED; + + npolled = ionic_poll_eq(eq, IONIC_EQ_ISR_BUDGET); + if (npolled == IONIC_EQ_ISR_BUDGET) { + ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr, + npolled, 0); + queue_work(ionic_evt_workq, &eq->work); + } else { + xchg(&eq->armed, 1); + ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr, + 0, IONIC_INTR_CRED_UNMASK); + } + + return IRQ_HANDLED; +} + +static struct ionic_eq *ionic_create_eq(struct ionic_ibdev *dev, int eqid) +{ + struct ionic_intr_info intr_obj = { }; + struct ionic_eq *eq; + int rc; + + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) + return ERR_PTR(-ENOMEM); + + eq->dev = dev; + + rc = ionic_queue_init(&eq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH, + sizeof(struct ionic_v1_eqe)); + if (rc) + goto err_q; + + eq->eqid = eqid; + + eq->armed = true; + eq->enable = false; + INIT_WORK(&eq->work, ionic_poll_eq_work); + + rc = ionic_intr_alloc(dev->lif_cfg.lif, &intr_obj); + if (rc < 0) + goto err_intr; + + eq->irq = intr_obj.vector; + eq->intr = intr_obj.index; + + ionic_queue_dbell_init(&eq->q, eq->eqid); + + /* cons is color for eq */ + eq->q.cons = true; + + snprintf(eq->name, sizeof(eq->name), "%s-%d-%d-eq", + "ionr", dev->lif_cfg.lif_index, eq->eqid); + + ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET); + ionic_intr_mask_assert(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET); + ionic_intr_coal_init(dev->lif_cfg.intr_ctrl, eq->intr, 0); + ionic_intr_clean(dev->lif_cfg.intr_ctrl, eq->intr); + + eq->enable = true; + + rc = request_irq(eq->irq, ionic_poll_eq_isr, 0, eq->name, eq); + if (rc) + goto err_irq; + + rc = ionic_rdma_queue_devcmd(dev, &eq->q, eq->eqid, eq->intr, + IONIC_CMD_RDMA_CREATE_EQ); + if (rc) + goto err_cmd; + + ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_CLEAR); + + return eq; + +err_cmd: + eq->enable = false; + free_irq(eq->irq, eq); + flush_work(&eq->work); +err_irq: + ionic_intr_free(dev->lif_cfg.lif, eq->intr); +err_intr: + ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev); +err_q: + kfree(eq); + + return ERR_PTR(rc); +} + +static void ionic_destroy_eq(struct ionic_eq *eq) +{ + struct ionic_ibdev *dev = eq->dev; + + eq->enable = false; + free_irq(eq->irq, eq); + flush_work(&eq->work); + + ionic_intr_free(dev->lif_cfg.lif, eq->intr); + ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev); + kfree(eq); +} + +int ionic_create_rdma_admin(struct ionic_ibdev *dev) +{ + int eq_i = 0, aq_i = 0, rc = 0; + struct ionic_vcq *vcq; + struct ionic_aq *aq; + struct ionic_eq *eq; + + dev->eq_vec = NULL; + dev->aq_vec = NULL; + + INIT_WORK(&dev->reset_work, ionic_reset_work); + INIT_DELAYED_WORK(&dev->admin_dwork, ionic_admin_dwork); + atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED); + + if (dev->lif_cfg.aq_count > IONIC_AQ_COUNT) { + ibdev_dbg(&dev->ibdev, "limiting adminq count to %d\n", + IONIC_AQ_COUNT); + dev->lif_cfg.aq_count = IONIC_AQ_COUNT; + } + + if (dev->lif_cfg.eq_count > IONIC_EQ_COUNT) { + dev_dbg(&dev->ibdev.dev, "limiting eventq count to %d\n", + IONIC_EQ_COUNT); + dev->lif_cfg.eq_count = IONIC_EQ_COUNT; + } + + /* need at least two eq and one aq */ + if (dev->lif_cfg.eq_count < IONIC_EQ_COUNT_MIN || + dev->lif_cfg.aq_count < IONIC_AQ_COUNT_MIN) { + rc = -EINVAL; + goto out; + } + + dev->eq_vec = kmalloc_array(dev->lif_cfg.eq_count, sizeof(*dev->eq_vec), + GFP_KERNEL); + if (!dev->eq_vec) { + rc = -ENOMEM; + goto out; + } + + for (eq_i = 0; eq_i < dev->lif_cfg.eq_count; ++eq_i) { + eq = ionic_create_eq(dev, eq_i + dev->lif_cfg.eq_base); + if (IS_ERR(eq)) { + rc = PTR_ERR(eq); + + if (eq_i < IONIC_EQ_COUNT_MIN) { + ibdev_err(&dev->ibdev, + "fail create eq %pe\n", eq); + goto out; + } + + /* ok, just fewer eq than device supports */ + ibdev_dbg(&dev->ibdev, "eq count %d want %d rc %pe\n", + eq_i, dev->lif_cfg.eq_count, eq); + + rc = 0; + break; + } + + dev->eq_vec[eq_i] = eq; + } + + dev->lif_cfg.eq_count = eq_i; + + dev->aq_vec = kmalloc_array(dev->lif_cfg.aq_count, sizeof(*dev->aq_vec), + GFP_KERNEL); + if (!dev->aq_vec) { + rc = -ENOMEM; + goto out; + } + + /* Create one CQ per AQ */ + for (aq_i = 0; aq_i < dev->lif_cfg.aq_count; ++aq_i) { + vcq = ionic_create_rdma_admincq(dev, aq_i % eq_i); + if (IS_ERR(vcq)) { + rc = PTR_ERR(vcq); + + if (!aq_i) { + ibdev_err(&dev->ibdev, + "failed to create acq %pe\n", vcq); + goto out; + } + + /* ok, just fewer adminq than device supports */ + ibdev_dbg(&dev->ibdev, "acq count %d want %d rc %pe\n", + aq_i, dev->lif_cfg.aq_count, vcq); + break; + } + + aq = ionic_create_rdma_adminq(dev, aq_i + dev->lif_cfg.aq_base, + vcq->cq[0].cqid); + if (IS_ERR(aq)) { + /* Clean up the dangling CQ */ + ionic_destroy_cq_common(dev, &vcq->cq[0]); + kfree(vcq); + + rc = PTR_ERR(aq); + + if (!aq_i) { + ibdev_err(&dev->ibdev, + "failed to create aq %pe\n", aq); + goto out; + } + + /* ok, just fewer adminq than device supports */ + ibdev_dbg(&dev->ibdev, "aq count %d want %d rc %pe\n", + aq_i, dev->lif_cfg.aq_count, aq); + break; + } + + vcq->ibcq.cq_context = aq; + aq->vcq = vcq; + + atomic_set(&aq->admin_state, IONIC_ADMIN_ACTIVE); + dev->aq_vec[aq_i] = aq; + } + + atomic_set(&dev->admin_state, IONIC_ADMIN_ACTIVE); +out: + dev->lif_cfg.eq_count = eq_i; + dev->lif_cfg.aq_count = aq_i; + + return rc; +} + +void ionic_destroy_rdma_admin(struct ionic_ibdev *dev) +{ + struct ionic_vcq *vcq; + struct ionic_aq *aq; + struct ionic_eq *eq; + + /* + * Killing the admin before destroy makes sure all admin and + * completions are flushed. admin_state = IONIC_ADMIN_KILLED + * stops queueing up further works. + */ + cancel_delayed_work_sync(&dev->admin_dwork); + cancel_work_sync(&dev->reset_work); + + if (dev->aq_vec) { + while (dev->lif_cfg.aq_count > 0) { + aq = dev->aq_vec[--dev->lif_cfg.aq_count]; + vcq = aq->vcq; + + cancel_work_sync(&aq->work); + + __ionic_destroy_rdma_adminq(dev, aq); + if (vcq) { + ionic_destroy_cq_common(dev, &vcq->cq[0]); + kfree(vcq); + } + } + + kfree(dev->aq_vec); + } + + if (dev->eq_vec) { + while (dev->lif_cfg.eq_count > 0) { + eq = dev->eq_vec[--dev->lif_cfg.eq_count]; + ionic_destroy_eq(eq); + } + + kfree(dev->eq_vec); + } +} diff --git a/drivers/infiniband/hw/ionic/ionic_controlpath.c b/drivers/infiniband/hw/ionic/ionic_controlpath.c new file mode 100644 index 000000000000..ea12d9b8e125 --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_controlpath.c @@ -0,0 +1,2679 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#include <linux/module.h> +#include <linux/printk.h> +#include <rdma/ib_addr.h> +#include <rdma/ib_cache.h> +#include <rdma/ib_user_verbs.h> +#include <ionic_api.h> + +#include "ionic_fw.h" +#include "ionic_ibdev.h" + +#define ionic_set_ecn(tos) (((tos) | 2u) & ~1u) +#define ionic_clear_ecn(tos) ((tos) & ~3u) + +static int ionic_validate_qdesc(struct ionic_qdesc *q) +{ + if (!q->addr || !q->size || !q->mask || + !q->depth_log2 || !q->stride_log2) + return -EINVAL; + + if (q->addr & (PAGE_SIZE - 1)) + return -EINVAL; + + if (q->mask != BIT(q->depth_log2) - 1) + return -EINVAL; + + if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2)) + return -EINVAL; + + return 0; +} + +static u32 ionic_get_eqid(struct ionic_ibdev *dev, u32 comp_vector, u8 udma_idx) +{ + /* EQ per vector per udma, and the first eqs reserved for async events. + * The rest of the vectors can be requested for completions. + */ + u32 comp_vec_count = dev->lif_cfg.eq_count / dev->lif_cfg.udma_count - 1; + + return (comp_vector % comp_vec_count + 1) * dev->lif_cfg.udma_count + udma_idx; +} + +static int ionic_get_cqid(struct ionic_ibdev *dev, u32 *cqid, u8 udma_idx) +{ + unsigned int size, base, bound; + int rc; + + size = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count; + base = size * udma_idx; + bound = base + size; + + rc = ionic_resid_get_shared(&dev->inuse_cqid, base, bound); + if (rc >= 0) { + /* cq_base is zero or a multiple of two queue groups */ + *cqid = dev->lif_cfg.cq_base + + ionic_bitid_to_qid(rc, dev->lif_cfg.udma_qgrp_shift, + dev->half_cqid_udma_shift); + + rc = 0; + } + + return rc; +} + +static void ionic_put_cqid(struct ionic_ibdev *dev, u32 cqid) +{ + u32 bitid = ionic_qid_to_bitid(cqid - dev->lif_cfg.cq_base, + dev->lif_cfg.udma_qgrp_shift, + dev->half_cqid_udma_shift); + + ionic_resid_put(&dev->inuse_cqid, bitid); +} + +int ionic_create_cq_common(struct ionic_vcq *vcq, + struct ionic_tbl_buf *buf, + const struct ib_cq_init_attr *attr, + struct ionic_ctx *ctx, + struct ib_udata *udata, + struct ionic_qdesc *req_cq, + __u32 *resp_cqid, + int udma_idx) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(vcq->ibcq.device); + struct ionic_cq *cq = &vcq->cq[udma_idx]; + void *entry; + int rc; + + cq->vcq = vcq; + + if (attr->cqe < 1 || attr->cqe + IONIC_CQ_GRACE > 0xffff) { + rc = -EINVAL; + goto err_args; + } + + rc = ionic_get_cqid(dev, &cq->cqid, udma_idx); + if (rc) + goto err_args; + + cq->eqid = ionic_get_eqid(dev, attr->comp_vector, udma_idx); + + spin_lock_init(&cq->lock); + INIT_LIST_HEAD(&cq->poll_sq); + INIT_LIST_HEAD(&cq->flush_sq); + INIT_LIST_HEAD(&cq->flush_rq); + + if (udata) { + rc = ionic_validate_qdesc(req_cq); + if (rc) + goto err_qdesc; + + cq->umem = ib_umem_get(&dev->ibdev, req_cq->addr, req_cq->size, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(cq->umem)) { + rc = PTR_ERR(cq->umem); + goto err_qdesc; + } + + cq->q.ptr = NULL; + cq->q.size = req_cq->size; + cq->q.mask = req_cq->mask; + cq->q.depth_log2 = req_cq->depth_log2; + cq->q.stride_log2 = req_cq->stride_log2; + + *resp_cqid = cq->cqid; + } else { + rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev, + attr->cqe + IONIC_CQ_GRACE, + sizeof(struct ionic_v1_cqe)); + if (rc) + goto err_q_init; + + ionic_queue_dbell_init(&cq->q, cq->cqid); + cq->color = true; + cq->credit = cq->q.mask; + } + + rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE); + if (rc) + goto err_pgtbl_init; + + init_completion(&cq->cq_rel_comp); + kref_init(&cq->cq_kref); + + entry = xa_store_irq(&dev->cq_tbl, cq->cqid, cq, GFP_KERNEL); + if (entry) { + if (!xa_is_err(entry)) + rc = -EINVAL; + else + rc = xa_err(entry); + + goto err_xa; + } + + return 0; + +err_xa: + ionic_pgtbl_unbuf(dev, buf); +err_pgtbl_init: + if (!udata) + ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev); +err_q_init: + if (cq->umem) + ib_umem_release(cq->umem); +err_qdesc: + ionic_put_cqid(dev, cq->cqid); +err_args: + cq->vcq = NULL; + + return rc; +} + +void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq) +{ + if (!cq->vcq) + return; + + xa_erase_irq(&dev->cq_tbl, cq->cqid); + + kref_put(&cq->cq_kref, ionic_cq_complete); + wait_for_completion(&cq->cq_rel_comp); + + if (cq->umem) + ib_umem_release(cq->umem); + else + ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev); + + ionic_put_cqid(dev, cq->cqid); + + cq->vcq = NULL; +} + +static int ionic_validate_qdesc_zero(struct ionic_qdesc *q) +{ + if (q->addr || q->size || q->mask || q->depth_log2 || q->stride_log2) + return -EINVAL; + + return 0; +} + +static int ionic_get_pdid(struct ionic_ibdev *dev, u32 *pdid) +{ + int rc; + + rc = ionic_resid_get(&dev->inuse_pdid); + if (rc < 0) + return rc; + + *pdid = rc; + return 0; +} + +static int ionic_get_ahid(struct ionic_ibdev *dev, u32 *ahid) +{ + int rc; + + rc = ionic_resid_get(&dev->inuse_ahid); + if (rc < 0) + return rc; + + *ahid = rc; + return 0; +} + +static int ionic_get_mrid(struct ionic_ibdev *dev, u32 *mrid) +{ + int rc; + + /* wrap to 1, skip reserved lkey */ + rc = ionic_resid_get_shared(&dev->inuse_mrid, 1, + dev->inuse_mrid.inuse_size); + if (rc < 0) + return rc; + + *mrid = ionic_mrid(rc, dev->next_mrkey++); + return 0; +} + +static int ionic_get_gsi_qpid(struct ionic_ibdev *dev, u32 *qpid) +{ + int rc = 0; + + rc = ionic_resid_get_shared(&dev->inuse_qpid, IB_QPT_GSI, IB_QPT_GSI + 1); + if (rc < 0) + return rc; + + *qpid = IB_QPT_GSI; + return 0; +} + +static int ionic_get_qpid(struct ionic_ibdev *dev, u32 *qpid, + u8 *udma_idx, u8 udma_mask) +{ + unsigned int size, base, bound; + int udma_i, udma_x, udma_ix; + int rc = -EINVAL; + + udma_x = dev->next_qpid_udma_idx; + + dev->next_qpid_udma_idx ^= dev->lif_cfg.udma_count - 1; + + for (udma_i = 0; udma_i < dev->lif_cfg.udma_count; ++udma_i) { + udma_ix = udma_i ^ udma_x; + + if (!(udma_mask & BIT(udma_ix))) + continue; + + size = dev->lif_cfg.qp_count / dev->lif_cfg.udma_count; + base = size * udma_ix; + bound = base + size; + + /* skip reserved SMI and GSI qpids in group zero */ + if (!base) + base = 2; + + rc = ionic_resid_get_shared(&dev->inuse_qpid, base, bound); + if (rc >= 0) { + *qpid = ionic_bitid_to_qid(rc, + dev->lif_cfg.udma_qgrp_shift, + dev->half_qpid_udma_shift); + *udma_idx = udma_ix; + + rc = 0; + break; + } + } + + return rc; +} + +static int ionic_get_dbid(struct ionic_ibdev *dev, u32 *dbid, phys_addr_t *addr) +{ + int rc, dbpage_num; + + /* wrap to 1, skip kernel reserved */ + rc = ionic_resid_get_shared(&dev->inuse_dbid, 1, + dev->inuse_dbid.inuse_size); + if (rc < 0) + return rc; + + dbpage_num = (dev->lif_cfg.lif_hw_index * dev->lif_cfg.dbid_count) + rc; + *addr = dev->lif_cfg.db_phys + ((phys_addr_t)dbpage_num << PAGE_SHIFT); + + *dbid = rc; + + return 0; +} + +static void ionic_put_pdid(struct ionic_ibdev *dev, u32 pdid) +{ + ionic_resid_put(&dev->inuse_pdid, pdid); +} + +static void ionic_put_ahid(struct ionic_ibdev *dev, u32 ahid) +{ + ionic_resid_put(&dev->inuse_ahid, ahid); +} + +static void ionic_put_mrid(struct ionic_ibdev *dev, u32 mrid) +{ + ionic_resid_put(&dev->inuse_mrid, ionic_mrid_index(mrid)); +} + +static void ionic_put_qpid(struct ionic_ibdev *dev, u32 qpid) +{ + u32 bitid = ionic_qid_to_bitid(qpid, + dev->lif_cfg.udma_qgrp_shift, + dev->half_qpid_udma_shift); + + ionic_resid_put(&dev->inuse_qpid, bitid); +} + +static void ionic_put_dbid(struct ionic_ibdev *dev, u32 dbid) +{ + ionic_resid_put(&dev->inuse_dbid, dbid); +} + +static struct rdma_user_mmap_entry* +ionic_mmap_entry_insert(struct ionic_ctx *ctx, unsigned long size, + unsigned long pfn, u8 mmap_flags, u64 *offset) +{ + struct ionic_mmap_entry *entry; + int rc; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + entry->size = size; + entry->pfn = pfn; + entry->mmap_flags = mmap_flags; + + rc = rdma_user_mmap_entry_insert(&ctx->ibctx, &entry->rdma_entry, + entry->size); + if (rc) { + kfree(entry); + return NULL; + } + + if (offset) + *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + + return &entry->rdma_entry; +} + +int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device); + struct ionic_ctx *ctx = to_ionic_ctx(ibctx); + struct ionic_ctx_resp resp = {}; + struct ionic_ctx_req req; + phys_addr_t db_phys = 0; + int rc; + + rc = ib_copy_from_udata(&req, udata, sizeof(req)); + if (rc) + return rc; + + /* try to allocate dbid for user ctx */ + rc = ionic_get_dbid(dev, &ctx->dbid, &db_phys); + if (rc < 0) + return rc; + + ibdev_dbg(&dev->ibdev, "user space dbid %u\n", ctx->dbid); + + ctx->mmap_dbell = ionic_mmap_entry_insert(ctx, PAGE_SIZE, + PHYS_PFN(db_phys), 0, NULL); + if (!ctx->mmap_dbell) { + rc = -ENOMEM; + goto err_mmap_dbell; + } + + resp.page_shift = PAGE_SHIFT; + + resp.dbell_offset = db_phys & ~PAGE_MASK; + + resp.version = dev->lif_cfg.rdma_version; + resp.qp_opcodes = dev->lif_cfg.qp_opcodes; + resp.admin_opcodes = dev->lif_cfg.admin_opcodes; + + resp.sq_qtype = dev->lif_cfg.sq_qtype; + resp.rq_qtype = dev->lif_cfg.rq_qtype; + resp.cq_qtype = dev->lif_cfg.cq_qtype; + resp.admin_qtype = dev->lif_cfg.aq_qtype; + resp.max_stride = dev->lif_cfg.max_stride; + resp.max_spec = IONIC_SPEC_HIGH; + + resp.udma_count = dev->lif_cfg.udma_count; + resp.expdb_mask = dev->lif_cfg.expdb_mask; + + if (dev->lif_cfg.sq_expdb) + resp.expdb_qtypes |= IONIC_EXPDB_SQ; + if (dev->lif_cfg.rq_expdb) + resp.expdb_qtypes |= IONIC_EXPDB_RQ; + + rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (rc) + goto err_resp; + + return 0; + +err_resp: + rdma_user_mmap_entry_remove(ctx->mmap_dbell); +err_mmap_dbell: + ionic_put_dbid(dev, ctx->dbid); + + return rc; +} + +void ionic_dealloc_ucontext(struct ib_ucontext *ibctx) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device); + struct ionic_ctx *ctx = to_ionic_ctx(ibctx); + + rdma_user_mmap_entry_remove(ctx->mmap_dbell); + ionic_put_dbid(dev, ctx->dbid); +} + +int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device); + struct ionic_ctx *ctx = to_ionic_ctx(ibctx); + struct rdma_user_mmap_entry *rdma_entry; + struct ionic_mmap_entry *ionic_entry; + int rc = 0; + + rdma_entry = rdma_user_mmap_entry_get(&ctx->ibctx, vma); + if (!rdma_entry) { + ibdev_dbg(&dev->ibdev, "not found %#lx\n", + vma->vm_pgoff << PAGE_SHIFT); + return -EINVAL; + } + + ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry, + rdma_entry); + + ibdev_dbg(&dev->ibdev, "writecombine? %d\n", + ionic_entry->mmap_flags & IONIC_MMAP_WC); + if (ionic_entry->mmap_flags & IONIC_MMAP_WC) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + ibdev_dbg(&dev->ibdev, "remap st %#lx pf %#lx sz %#lx\n", + vma->vm_start, ionic_entry->pfn, ionic_entry->size); + rc = rdma_user_mmap_io(&ctx->ibctx, vma, ionic_entry->pfn, + ionic_entry->size, vma->vm_page_prot, + rdma_entry); + if (rc) + ibdev_dbg(&dev->ibdev, "remap failed %d\n", rc); + + rdma_user_mmap_entry_put(rdma_entry); + return rc; +} + +void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry) +{ + struct ionic_mmap_entry *ionic_entry; + + ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry, + rdma_entry); + kfree(ionic_entry); +} + +int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device); + struct ionic_pd *pd = to_ionic_pd(ibpd); + + return ionic_get_pdid(dev, &pd->pdid); +} + +int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device); + struct ionic_pd *pd = to_ionic_pd(ibpd); + + ionic_put_pdid(dev, pd->pdid); + + return 0; +} + +static int ionic_build_hdr(struct ionic_ibdev *dev, + struct ib_ud_header *hdr, + const struct rdma_ah_attr *attr, + u16 sport, bool want_ecn) +{ + const struct ib_global_route *grh; + enum rdma_network_type net; + u16 vlan; + int rc; + + if (attr->ah_flags != IB_AH_GRH) + return -EINVAL; + if (attr->type != RDMA_AH_ATTR_TYPE_ROCE) + return -EINVAL; + + grh = rdma_ah_read_grh(attr); + + rc = rdma_read_gid_l2_fields(grh->sgid_attr, &vlan, &hdr->eth.smac_h[0]); + if (rc) + return rc; + + net = rdma_gid_attr_network_type(grh->sgid_attr); + + rc = ib_ud_header_init(0, /* no payload */ + 0, /* no lrh */ + 1, /* yes eth */ + vlan != 0xffff, + 0, /* no grh */ + net == RDMA_NETWORK_IPV4 ? 4 : 6, + 1, /* yes udp */ + 0, /* no imm */ + hdr); + if (rc) + return rc; + + ether_addr_copy(hdr->eth.dmac_h, attr->roce.dmac); + + if (net == RDMA_NETWORK_IPV4) { + hdr->eth.type = cpu_to_be16(ETH_P_IP); + hdr->ip4.frag_off = cpu_to_be16(0x4000); /* don't fragment */ + hdr->ip4.ttl = grh->hop_limit; + hdr->ip4.tot_len = cpu_to_be16(0xffff); + hdr->ip4.saddr = + *(const __be32 *)(grh->sgid_attr->gid.raw + 12); + hdr->ip4.daddr = *(const __be32 *)(grh->dgid.raw + 12); + + if (want_ecn) + hdr->ip4.tos = ionic_set_ecn(grh->traffic_class); + else + hdr->ip4.tos = ionic_clear_ecn(grh->traffic_class); + } else { + hdr->eth.type = cpu_to_be16(ETH_P_IPV6); + hdr->grh.flow_label = cpu_to_be32(grh->flow_label); + hdr->grh.hop_limit = grh->hop_limit; + hdr->grh.source_gid = grh->sgid_attr->gid; + hdr->grh.destination_gid = grh->dgid; + + if (want_ecn) + hdr->grh.traffic_class = + ionic_set_ecn(grh->traffic_class); + else + hdr->grh.traffic_class = + ionic_clear_ecn(grh->traffic_class); + } + + if (vlan != 0xffff) { + vlan |= rdma_ah_get_sl(attr) << VLAN_PRIO_SHIFT; + hdr->vlan.tag = cpu_to_be16(vlan); + hdr->vlan.type = hdr->eth.type; + hdr->eth.type = cpu_to_be16(ETH_P_8021Q); + } + + hdr->udp.sport = cpu_to_be16(sport); + hdr->udp.dport = cpu_to_be16(ROCE_V2_UDP_DPORT); + + return 0; +} + +static void ionic_set_ah_attr(struct ionic_ibdev *dev, + struct rdma_ah_attr *ah_attr, + struct ib_ud_header *hdr, + int sgid_index) +{ + u32 flow_label; + u16 vlan = 0; + u8 tos, ttl; + + if (hdr->vlan_present) + vlan = be16_to_cpu(hdr->vlan.tag); + + if (hdr->ipv4_present) { + flow_label = 0; + ttl = hdr->ip4.ttl; + tos = hdr->ip4.tos; + *(__be16 *)(hdr->grh.destination_gid.raw + 10) = cpu_to_be16(0xffff); + *(__be32 *)(hdr->grh.destination_gid.raw + 12) = hdr->ip4.daddr; + } else { + flow_label = be32_to_cpu(hdr->grh.flow_label); + ttl = hdr->grh.hop_limit; + tos = hdr->grh.traffic_class; + } + + memset(ah_attr, 0, sizeof(*ah_attr)); + ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE; + if (hdr->eth_present) + ether_addr_copy(ah_attr->roce.dmac, hdr->eth.dmac_h); + rdma_ah_set_sl(ah_attr, vlan >> VLAN_PRIO_SHIFT); + rdma_ah_set_port_num(ah_attr, 1); + rdma_ah_set_grh(ah_attr, NULL, flow_label, sgid_index, ttl, tos); + rdma_ah_set_dgid_raw(ah_attr, &hdr->grh.destination_gid); +} + +static int ionic_create_ah_cmd(struct ionic_ibdev *dev, + struct ionic_ah *ah, + struct ionic_pd *pd, + struct rdma_ah_attr *attr, + u32 flags) +{ + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_CREATE_AH, + .len = cpu_to_le16(IONIC_ADMIN_CREATE_AH_IN_V1_LEN), + .cmd.create_ah = { + .pd_id = cpu_to_le32(pd->pdid), + .dbid_flags = cpu_to_le16(dev->lif_cfg.dbid), + .id_ver = cpu_to_le32(ah->ahid), + } + } + }; + enum ionic_admin_flags admin_flags = 0; + dma_addr_t hdr_dma = 0; + void *hdr_buf; + gfp_t gfp = GFP_ATOMIC; + int rc, hdr_len = 0; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_AH) + return -EBADRQC; + + if (flags & RDMA_CREATE_AH_SLEEPABLE) + gfp = GFP_KERNEL; + else + admin_flags |= IONIC_ADMIN_F_BUSYWAIT; + + rc = ionic_build_hdr(dev, &ah->hdr, attr, IONIC_ROCE_UDP_SPORT, false); + if (rc) + return rc; + + if (ah->hdr.eth.type == cpu_to_be16(ETH_P_8021Q)) { + if (ah->hdr.vlan.type == cpu_to_be16(ETH_P_IP)) + wr.wqe.cmd.create_ah.csum_profile = + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP; + else + wr.wqe.cmd.create_ah.csum_profile = + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP; + } else { + if (ah->hdr.eth.type == cpu_to_be16(ETH_P_IP)) + wr.wqe.cmd.create_ah.csum_profile = + IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP; + else + wr.wqe.cmd.create_ah.csum_profile = + IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP; + } + + ah->sgid_index = rdma_ah_read_grh(attr)->sgid_index; + + hdr_buf = kmalloc(PAGE_SIZE, gfp); + if (!hdr_buf) + return -ENOMEM; + + hdr_len = ib_ud_header_pack(&ah->hdr, hdr_buf); + hdr_len -= IB_BTH_BYTES; + hdr_len -= IB_DETH_BYTES; + ibdev_dbg(&dev->ibdev, "roce packet header template\n"); + print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1, + hdr_buf, hdr_len, true); + + hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len, + DMA_TO_DEVICE); + + rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma); + if (rc) + goto err_dma; + + wr.wqe.cmd.create_ah.dma_addr = cpu_to_le64(hdr_dma); + wr.wqe.cmd.create_ah.length = cpu_to_le32(hdr_len); + + ionic_admin_post(dev, &wr); + rc = ionic_admin_wait(dev, &wr, admin_flags); + + dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len, + DMA_TO_DEVICE); +err_dma: + kfree(hdr_buf); + + return rc; +} + +static int ionic_destroy_ah_cmd(struct ionic_ibdev *dev, u32 ahid, u32 flags) +{ + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_DESTROY_AH, + .len = cpu_to_le16(IONIC_ADMIN_DESTROY_AH_IN_V1_LEN), + .cmd.destroy_ah = { + .ah_id = cpu_to_le32(ahid), + }, + } + }; + enum ionic_admin_flags admin_flags = IONIC_ADMIN_F_TEARDOWN; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_AH) + return -EBADRQC; + + if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) + admin_flags |= IONIC_ADMIN_F_BUSYWAIT; + + ionic_admin_post(dev, &wr); + ionic_admin_wait(dev, &wr, admin_flags); + + /* No host-memory resource is associated with ah, so it is ok + * to "succeed" and complete this destroy ah on the host. + */ + return 0; +} + +int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, + struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device); + struct rdma_ah_attr *attr = init_attr->ah_attr; + struct ionic_pd *pd = to_ionic_pd(ibah->pd); + struct ionic_ah *ah = to_ionic_ah(ibah); + struct ionic_ah_resp resp = {}; + u32 flags = init_attr->flags; + int rc; + + rc = ionic_get_ahid(dev, &ah->ahid); + if (rc) + return rc; + + rc = ionic_create_ah_cmd(dev, ah, pd, attr, flags); + if (rc) + goto err_cmd; + + if (udata) { + resp.ahid = ah->ahid; + + rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (rc) + goto err_resp; + } + + return 0; + +err_resp: + ionic_destroy_ah_cmd(dev, ah->ahid, flags); +err_cmd: + ionic_put_ahid(dev, ah->ahid); + return rc; +} + +int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device); + struct ionic_ah *ah = to_ionic_ah(ibah); + + ionic_set_ah_attr(dev, ah_attr, &ah->hdr, ah->sgid_index); + + return 0; +} + +int ionic_destroy_ah(struct ib_ah *ibah, u32 flags) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device); + struct ionic_ah *ah = to_ionic_ah(ibah); + int rc; + + rc = ionic_destroy_ah_cmd(dev, ah->ahid, flags); + if (rc) + return rc; + + ionic_put_ahid(dev, ah->ahid); + + return 0; +} + +static int ionic_create_mr_cmd(struct ionic_ibdev *dev, + struct ionic_pd *pd, + struct ionic_mr *mr, + u64 addr, + u64 length) +{ + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_CREATE_MR, + .len = cpu_to_le16(IONIC_ADMIN_CREATE_MR_IN_V1_LEN), + .cmd.create_mr = { + .va = cpu_to_le64(addr), + .length = cpu_to_le64(length), + .pd_id = cpu_to_le32(pd->pdid), + .page_size_log2 = mr->buf.page_size_log2, + .tbl_index = cpu_to_le32(~0), + .map_count = cpu_to_le32(mr->buf.tbl_pages), + .dma_addr = ionic_pgtbl_dma(&mr->buf, addr), + .dbid_flags = cpu_to_le16(mr->flags), + .id_ver = cpu_to_le32(mr->mrid), + } + } + }; + int rc; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_MR) + return -EBADRQC; + + ionic_admin_post(dev, &wr); + rc = ionic_admin_wait(dev, &wr, 0); + if (!rc) + mr->created = true; + + return rc; +} + +static int ionic_destroy_mr_cmd(struct ionic_ibdev *dev, u32 mrid) +{ + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_DESTROY_MR, + .len = cpu_to_le16(IONIC_ADMIN_DESTROY_MR_IN_V1_LEN), + .cmd.destroy_mr = { + .mr_id = cpu_to_le32(mrid), + }, + } + }; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_MR) + return -EBADRQC; + + ionic_admin_post(dev, &wr); + + return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN); +} + +struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access) +{ + struct ionic_pd *pd = to_ionic_pd(ibpd); + struct ionic_mr *mr; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->ibmr.lkey = IONIC_DMA_LKEY; + mr->ibmr.rkey = IONIC_DMA_RKEY; + + if (pd) + pd->flags |= IONIC_QPF_PRIVILEGED; + + return &mr->ibmr; +} + +struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, + u64 addr, int access, struct ib_dmah *dmah, + struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device); + struct ionic_pd *pd = to_ionic_pd(ibpd); + struct ionic_mr *mr; + unsigned long pg_sz; + int rc; + + if (dmah) + return ERR_PTR(-EOPNOTSUPP); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + rc = ionic_get_mrid(dev, &mr->mrid); + if (rc) + goto err_mrid; + + mr->ibmr.lkey = mr->mrid; + mr->ibmr.rkey = mr->mrid; + mr->ibmr.iova = addr; + mr->ibmr.length = length; + + mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access); + + mr->umem = ib_umem_get(&dev->ibdev, start, length, access); + if (IS_ERR(mr->umem)) { + rc = PTR_ERR(mr->umem); + goto err_umem; + } + + pg_sz = ib_umem_find_best_pgsz(mr->umem, + dev->lif_cfg.page_size_supported, + addr); + if (!pg_sz) { + rc = -EINVAL; + goto err_pgtbl; + } + + rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz); + if (rc) + goto err_pgtbl; + + rc = ionic_create_mr_cmd(dev, pd, mr, addr, length); + if (rc) + goto err_cmd; + + ionic_pgtbl_unbuf(dev, &mr->buf); + + return &mr->ibmr; + +err_cmd: + ionic_pgtbl_unbuf(dev, &mr->buf); +err_pgtbl: + ib_umem_release(mr->umem); +err_umem: + ionic_put_mrid(dev, mr->mrid); +err_mrid: + kfree(mr); + return ERR_PTR(rc); +} + +struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset, + u64 length, u64 addr, int fd, int access, + struct ib_dmah *dmah, + struct uverbs_attr_bundle *attrs) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device); + struct ionic_pd *pd = to_ionic_pd(ibpd); + struct ib_umem_dmabuf *umem_dmabuf; + struct ionic_mr *mr; + u64 pg_sz; + int rc; + + if (dmah) + return ERR_PTR(-EOPNOTSUPP); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + rc = ionic_get_mrid(dev, &mr->mrid); + if (rc) + goto err_mrid; + + mr->ibmr.lkey = mr->mrid; + mr->ibmr.rkey = mr->mrid; + mr->ibmr.iova = addr; + mr->ibmr.length = length; + + mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access); + + umem_dmabuf = ib_umem_dmabuf_get_pinned(&dev->ibdev, offset, length, + fd, access); + if (IS_ERR(umem_dmabuf)) { + rc = PTR_ERR(umem_dmabuf); + goto err_umem; + } + + mr->umem = &umem_dmabuf->umem; + + pg_sz = ib_umem_find_best_pgsz(mr->umem, + dev->lif_cfg.page_size_supported, + addr); + if (!pg_sz) { + rc = -EINVAL; + goto err_pgtbl; + } + + rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz); + if (rc) + goto err_pgtbl; + + rc = ionic_create_mr_cmd(dev, pd, mr, addr, length); + if (rc) + goto err_cmd; + + ionic_pgtbl_unbuf(dev, &mr->buf); + + return &mr->ibmr; + +err_cmd: + ionic_pgtbl_unbuf(dev, &mr->buf); +err_pgtbl: + ib_umem_release(mr->umem); +err_umem: + ionic_put_mrid(dev, mr->mrid); +err_mrid: + kfree(mr); + return ERR_PTR(rc); +} + +int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device); + struct ionic_mr *mr = to_ionic_mr(ibmr); + int rc; + + if (!mr->ibmr.lkey) + goto out; + + if (mr->created) { + rc = ionic_destroy_mr_cmd(dev, mr->mrid); + if (rc) + return rc; + } + + ionic_pgtbl_unbuf(dev, &mr->buf); + + if (mr->umem) + ib_umem_release(mr->umem); + + ionic_put_mrid(dev, mr->mrid); + +out: + kfree(mr); + + return 0; +} + +struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type, + u32 max_sg) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device); + struct ionic_pd *pd = to_ionic_pd(ibpd); + struct ionic_mr *mr; + int rc; + + if (type != IB_MR_TYPE_MEM_REG) + return ERR_PTR(-EINVAL); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + rc = ionic_get_mrid(dev, &mr->mrid); + if (rc) + goto err_mrid; + + mr->ibmr.lkey = mr->mrid; + mr->ibmr.rkey = mr->mrid; + + mr->flags = IONIC_MRF_PHYS_MR; + + rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, max_sg, PAGE_SIZE); + if (rc) + goto err_pgtbl; + + mr->buf.tbl_pages = 0; + + rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0); + if (rc) + goto err_cmd; + + return &mr->ibmr; + +err_cmd: + ionic_pgtbl_unbuf(dev, &mr->buf); +err_pgtbl: + ionic_put_mrid(dev, mr->mrid); +err_mrid: + kfree(mr); + return ERR_PTR(rc); +} + +static int ionic_map_mr_page(struct ib_mr *ibmr, u64 dma) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device); + struct ionic_mr *mr = to_ionic_mr(ibmr); + + ibdev_dbg(&dev->ibdev, "dma %p\n", (void *)dma); + return ionic_pgtbl_page(&mr->buf, dma); +} + +int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device); + struct ionic_mr *mr = to_ionic_mr(ibmr); + int rc; + + /* mr must be allocated using ib_alloc_mr() */ + if (unlikely(!mr->buf.tbl_limit)) + return -EINVAL; + + mr->buf.tbl_pages = 0; + + if (mr->buf.tbl_buf) + dma_sync_single_for_cpu(dev->lif_cfg.hwdev, mr->buf.tbl_dma, + mr->buf.tbl_size, DMA_TO_DEVICE); + + ibdev_dbg(&dev->ibdev, "sg %p nent %d\n", sg, sg_nents); + rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ionic_map_mr_page); + + mr->buf.page_size_log2 = order_base_2(ibmr->page_size); + + if (mr->buf.tbl_buf) + dma_sync_single_for_device(dev->lif_cfg.hwdev, mr->buf.tbl_dma, + mr->buf.tbl_size, DMA_TO_DEVICE); + + return rc; +} + +int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device); + struct ionic_pd *pd = to_ionic_pd(ibmw->pd); + struct ionic_mr *mr = to_ionic_mw(ibmw); + int rc; + + rc = ionic_get_mrid(dev, &mr->mrid); + if (rc) + return rc; + + mr->ibmw.rkey = mr->mrid; + + if (mr->ibmw.type == IB_MW_TYPE_1) + mr->flags = IONIC_MRF_MW_1; + else + mr->flags = IONIC_MRF_MW_2; + + rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0); + if (rc) + goto err_cmd; + + return 0; + +err_cmd: + ionic_put_mrid(dev, mr->mrid); + return rc; +} + +int ionic_dealloc_mw(struct ib_mw *ibmw) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device); + struct ionic_mr *mr = to_ionic_mw(ibmw); + int rc; + + rc = ionic_destroy_mr_cmd(dev, mr->mrid); + if (rc) + return rc; + + ionic_put_mrid(dev, mr->mrid); + + return 0; +} + +static int ionic_create_cq_cmd(struct ionic_ibdev *dev, + struct ionic_ctx *ctx, + struct ionic_cq *cq, + struct ionic_tbl_buf *buf) +{ + const u16 dbid = ionic_ctx_dbid(dev, ctx); + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_CREATE_CQ, + .len = cpu_to_le16(IONIC_ADMIN_CREATE_CQ_IN_V1_LEN), + .cmd.create_cq = { + .eq_id = cpu_to_le32(cq->eqid), + .depth_log2 = cq->q.depth_log2, + .stride_log2 = cq->q.stride_log2, + .page_size_log2 = buf->page_size_log2, + .tbl_index = cpu_to_le32(~0), + .map_count = cpu_to_le32(buf->tbl_pages), + .dma_addr = ionic_pgtbl_dma(buf, 0), + .dbid_flags = cpu_to_le16(dbid), + .id_ver = cpu_to_le32(cq->cqid), + } + } + }; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_CQ) + return -EBADRQC; + + ionic_admin_post(dev, &wr); + + return ionic_admin_wait(dev, &wr, 0); +} + +static int ionic_destroy_cq_cmd(struct ionic_ibdev *dev, u32 cqid) +{ + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_DESTROY_CQ, + .len = cpu_to_le16(IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN), + .cmd.destroy_cq = { + .cq_id = cpu_to_le32(cqid), + }, + } + }; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_CQ) + return -EBADRQC; + + ionic_admin_post(dev, &wr); + + return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN); +} + +int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct uverbs_attr_bundle *attrs) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device); + struct ib_udata *udata = &attrs->driver_udata; + struct ionic_ctx *ctx = + rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx); + struct ionic_vcq *vcq = to_ionic_vcq(ibcq); + struct ionic_tbl_buf buf = {}; + struct ionic_cq_resp resp; + struct ionic_cq_req req; + int udma_idx = 0, rc; + + if (udata) { + rc = ib_copy_from_udata(&req, udata, sizeof(req)); + if (rc) + return rc; + } + + vcq->udma_mask = BIT(dev->lif_cfg.udma_count) - 1; + + if (udata) + vcq->udma_mask &= req.udma_mask; + + if (!vcq->udma_mask) { + rc = -EINVAL; + goto err_init; + } + + for (; udma_idx < dev->lif_cfg.udma_count; ++udma_idx) { + if (!(vcq->udma_mask & BIT(udma_idx))) + continue; + + rc = ionic_create_cq_common(vcq, &buf, attr, ctx, udata, + &req.cq[udma_idx], + &resp.cqid[udma_idx], + udma_idx); + if (rc) + goto err_init; + + rc = ionic_create_cq_cmd(dev, ctx, &vcq->cq[udma_idx], &buf); + if (rc) + goto err_cmd; + + ionic_pgtbl_unbuf(dev, &buf); + } + + vcq->ibcq.cqe = attr->cqe; + + if (udata) { + resp.udma_mask = vcq->udma_mask; + + rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (rc) + goto err_resp; + } + + return 0; + +err_resp: + while (udma_idx) { + --udma_idx; + if (!(vcq->udma_mask & BIT(udma_idx))) + continue; + ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid); +err_cmd: + ionic_pgtbl_unbuf(dev, &buf); + ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]); +err_init: + ; + } + + return rc; +} + +int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device); + struct ionic_vcq *vcq = to_ionic_vcq(ibcq); + int udma_idx, rc_tmp, rc = 0; + + for (udma_idx = dev->lif_cfg.udma_count; udma_idx; ) { + --udma_idx; + + if (!(vcq->udma_mask & BIT(udma_idx))) + continue; + + rc_tmp = ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid); + if (rc_tmp) { + if (!rc) + rc = rc_tmp; + + continue; + } + + ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]); + } + + return rc; +} + +static bool pd_remote_privileged(struct ib_pd *pd) +{ + return pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; +} + +static int ionic_create_qp_cmd(struct ionic_ibdev *dev, + struct ionic_pd *pd, + struct ionic_cq *send_cq, + struct ionic_cq *recv_cq, + struct ionic_qp *qp, + struct ionic_tbl_buf *sq_buf, + struct ionic_tbl_buf *rq_buf, + struct ib_qp_init_attr *attr) +{ + const u16 dbid = ionic_obj_dbid(dev, pd->ibpd.uobject); + const u32 flags = to_ionic_qp_flags(0, 0, + qp->sq_cmb & IONIC_CMB_ENABLE, + qp->rq_cmb & IONIC_CMB_ENABLE, + qp->sq_spec, qp->rq_spec, + pd->flags & IONIC_QPF_PRIVILEGED, + pd_remote_privileged(&pd->ibpd)); + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_CREATE_QP, + .len = cpu_to_le16(IONIC_ADMIN_CREATE_QP_IN_V1_LEN), + .cmd.create_qp = { + .pd_id = cpu_to_le32(pd->pdid), + .priv_flags = cpu_to_be32(flags), + .type_state = to_ionic_qp_type(attr->qp_type), + .dbid_flags = cpu_to_le16(dbid), + .id_ver = cpu_to_le32(qp->qpid), + } + } + }; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_QP) + return -EBADRQC; + + if (qp->has_sq) { + wr.wqe.cmd.create_qp.sq_cq_id = cpu_to_le32(send_cq->cqid); + wr.wqe.cmd.create_qp.sq_depth_log2 = qp->sq.depth_log2; + wr.wqe.cmd.create_qp.sq_stride_log2 = qp->sq.stride_log2; + wr.wqe.cmd.create_qp.sq_page_size_log2 = sq_buf->page_size_log2; + wr.wqe.cmd.create_qp.sq_tbl_index_xrcd_id = cpu_to_le32(~0); + wr.wqe.cmd.create_qp.sq_map_count = + cpu_to_le32(sq_buf->tbl_pages); + wr.wqe.cmd.create_qp.sq_dma_addr = ionic_pgtbl_dma(sq_buf, 0); + } + + if (qp->has_rq) { + wr.wqe.cmd.create_qp.rq_cq_id = cpu_to_le32(recv_cq->cqid); + wr.wqe.cmd.create_qp.rq_depth_log2 = qp->rq.depth_log2; + wr.wqe.cmd.create_qp.rq_stride_log2 = qp->rq.stride_log2; + wr.wqe.cmd.create_qp.rq_page_size_log2 = rq_buf->page_size_log2; + wr.wqe.cmd.create_qp.rq_tbl_index_srq_id = cpu_to_le32(~0); + wr.wqe.cmd.create_qp.rq_map_count = + cpu_to_le32(rq_buf->tbl_pages); + wr.wqe.cmd.create_qp.rq_dma_addr = ionic_pgtbl_dma(rq_buf, 0); + } + + ionic_admin_post(dev, &wr); + + return ionic_admin_wait(dev, &wr, 0); +} + +static int ionic_modify_qp_cmd(struct ionic_ibdev *dev, + struct ionic_pd *pd, + struct ionic_qp *qp, + struct ib_qp_attr *attr, + int mask) +{ + const u32 flags = to_ionic_qp_flags(attr->qp_access_flags, + attr->en_sqd_async_notify, + qp->sq_cmb & IONIC_CMB_ENABLE, + qp->rq_cmb & IONIC_CMB_ENABLE, + qp->sq_spec, qp->rq_spec, + pd->flags & IONIC_QPF_PRIVILEGED, + pd_remote_privileged(qp->ibqp.pd)); + const u8 state = to_ionic_qp_modify_state(attr->qp_state, + attr->cur_qp_state); + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_MODIFY_QP, + .len = cpu_to_le16(IONIC_ADMIN_MODIFY_QP_IN_V1_LEN), + .cmd.mod_qp = { + .attr_mask = cpu_to_be32(mask), + .access_flags = cpu_to_be16(flags), + .rq_psn = cpu_to_le32(attr->rq_psn), + .sq_psn = cpu_to_le32(attr->sq_psn), + .rate_limit_kbps = + cpu_to_le32(attr->rate_limit), + .pmtu = (attr->path_mtu + 7), + .retry = (attr->retry_cnt | + (attr->rnr_retry << 4)), + .rnr_timer = attr->min_rnr_timer, + .retry_timeout = attr->timeout, + .type_state = state, + .id_ver = cpu_to_le32(qp->qpid), + } + } + }; + const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); + void *hdr_buf = NULL; + dma_addr_t hdr_dma = 0; + int rc, hdr_len = 0; + u16 sport; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_MODIFY_QP) + return -EBADRQC; + + if ((mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) { + /* Note, round up/down was already done for allocating + * resources on the device. The allocation order is in cache + * line size. We can't use the order of the resource + * allocation to determine the order wqes here, because for + * queue length <= one cache line it is not distinct. + * + * Therefore, order wqes is computed again here. + * + * Account for hole and round up to the next order. + */ + wr.wqe.cmd.mod_qp.rsq_depth = + order_base_2(attr->max_dest_rd_atomic + 1); + wr.wqe.cmd.mod_qp.rsq_index = cpu_to_le32(~0); + } + + if ((mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { + /* Account for hole and round down to the next order */ + wr.wqe.cmd.mod_qp.rrq_depth = + order_base_2(attr->max_rd_atomic + 2) - 1; + wr.wqe.cmd.mod_qp.rrq_index = cpu_to_le32(~0); + } + + if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) + wr.wqe.cmd.mod_qp.qkey_dest_qpn = + cpu_to_le32(attr->dest_qp_num); + else + wr.wqe.cmd.mod_qp.qkey_dest_qpn = cpu_to_le32(attr->qkey); + + if (mask & IB_QP_AV) { + if (!qp->hdr) + return -ENOMEM; + + sport = rdma_get_udp_sport(grh->flow_label, + qp->qpid, + attr->dest_qp_num); + + rc = ionic_build_hdr(dev, qp->hdr, &attr->ah_attr, sport, true); + if (rc) + return rc; + + qp->sgid_index = grh->sgid_index; + + hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!hdr_buf) + return -ENOMEM; + + hdr_len = ib_ud_header_pack(qp->hdr, hdr_buf); + hdr_len -= IB_BTH_BYTES; + hdr_len -= IB_DETH_BYTES; + ibdev_dbg(&dev->ibdev, "roce packet header template\n"); + print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1, + hdr_buf, hdr_len, true); + + hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len, + DMA_TO_DEVICE); + + rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma); + if (rc) + goto err_dma; + + if (qp->hdr->ipv4_present) { + wr.wqe.cmd.mod_qp.tfp_csum_profile = + qp->hdr->vlan_present ? + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP : + IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP; + } else { + wr.wqe.cmd.mod_qp.tfp_csum_profile = + qp->hdr->vlan_present ? + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP : + IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP; + } + + wr.wqe.cmd.mod_qp.ah_id_len = + cpu_to_le32(qp->ahid | (hdr_len << 24)); + wr.wqe.cmd.mod_qp.dma_addr = cpu_to_le64(hdr_dma); + + wr.wqe.cmd.mod_qp.en_pcp = attr->ah_attr.sl; + wr.wqe.cmd.mod_qp.ip_dscp = grh->traffic_class >> 2; + } + + ionic_admin_post(dev, &wr); + + rc = ionic_admin_wait(dev, &wr, 0); + + if (mask & IB_QP_AV) + dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len, + DMA_TO_DEVICE); +err_dma: + if (mask & IB_QP_AV) + kfree(hdr_buf); + + return rc; +} + +static int ionic_query_qp_cmd(struct ionic_ibdev *dev, + struct ionic_qp *qp, + struct ib_qp_attr *attr, + int mask) +{ + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_QUERY_QP, + .len = cpu_to_le16(IONIC_ADMIN_QUERY_QP_IN_V1_LEN), + .cmd.query_qp = { + .id_ver = cpu_to_le32(qp->qpid), + }, + } + }; + struct ionic_v1_admin_query_qp_sq *query_sqbuf; + struct ionic_v1_admin_query_qp_rq *query_rqbuf; + dma_addr_t query_sqdma; + dma_addr_t query_rqdma; + dma_addr_t hdr_dma = 0; + void *hdr_buf = NULL; + int flags, rc; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_QUERY_QP) + return -EBADRQC; + + if (qp->has_sq) { + bool expdb = !!(qp->sq_cmb & IONIC_CMB_EXPDB); + + attr->cap.max_send_sge = + ionic_v1_send_wqe_max_sge(qp->sq.stride_log2, + qp->sq_spec, + expdb); + attr->cap.max_inline_data = + ionic_v1_send_wqe_max_data(qp->sq.stride_log2, expdb); + } + + if (qp->has_rq) { + attr->cap.max_recv_sge = + ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, + qp->rq_spec, + qp->rq_cmb & IONIC_CMB_EXPDB); + } + + query_sqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!query_sqbuf) + return -ENOMEM; + + query_rqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!query_rqbuf) { + rc = -ENOMEM; + goto err_rqbuf; + } + + query_sqdma = dma_map_single(dev->lif_cfg.hwdev, query_sqbuf, PAGE_SIZE, + DMA_FROM_DEVICE); + rc = dma_mapping_error(dev->lif_cfg.hwdev, query_sqdma); + if (rc) + goto err_sqdma; + + query_rqdma = dma_map_single(dev->lif_cfg.hwdev, query_rqbuf, PAGE_SIZE, + DMA_FROM_DEVICE); + rc = dma_mapping_error(dev->lif_cfg.hwdev, query_rqdma); + if (rc) + goto err_rqdma; + + if (mask & IB_QP_AV) { + hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!hdr_buf) { + rc = -ENOMEM; + goto err_hdrbuf; + } + + hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, + PAGE_SIZE, DMA_FROM_DEVICE); + rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma); + if (rc) + goto err_hdrdma; + } + + wr.wqe.cmd.query_qp.sq_dma_addr = cpu_to_le64(query_sqdma); + wr.wqe.cmd.query_qp.rq_dma_addr = cpu_to_le64(query_rqdma); + wr.wqe.cmd.query_qp.hdr_dma_addr = cpu_to_le64(hdr_dma); + wr.wqe.cmd.query_qp.ah_id = cpu_to_le32(qp->ahid); + + ionic_admin_post(dev, &wr); + + rc = ionic_admin_wait(dev, &wr, 0); + + if (rc) + goto err_hdrdma; + + flags = be16_to_cpu(query_sqbuf->access_perms_flags | + query_rqbuf->access_perms_flags); + + print_hex_dump_debug("sqbuf ", DUMP_PREFIX_OFFSET, 16, 1, + query_sqbuf, sizeof(*query_sqbuf), true); + print_hex_dump_debug("rqbuf ", DUMP_PREFIX_OFFSET, 16, 1, + query_rqbuf, sizeof(*query_rqbuf), true); + ibdev_dbg(&dev->ibdev, "query qp %u state_pmtu %#x flags %#x", + qp->qpid, query_rqbuf->state_pmtu, flags); + + attr->qp_state = from_ionic_qp_state(query_rqbuf->state_pmtu >> 4); + attr->cur_qp_state = attr->qp_state; + attr->path_mtu = (query_rqbuf->state_pmtu & 0xf) - 7; + attr->path_mig_state = IB_MIG_MIGRATED; + attr->qkey = be32_to_cpu(query_sqbuf->qkey_dest_qpn); + attr->rq_psn = be32_to_cpu(query_sqbuf->rq_psn); + attr->sq_psn = be32_to_cpu(query_rqbuf->sq_psn); + attr->dest_qp_num = attr->qkey; + attr->qp_access_flags = from_ionic_qp_flags(flags); + attr->pkey_index = 0; + attr->alt_pkey_index = 0; + attr->en_sqd_async_notify = !!(flags & IONIC_QPF_SQD_NOTIFY); + attr->sq_draining = !!(flags & IONIC_QPF_SQ_DRAINING); + attr->max_rd_atomic = BIT(query_rqbuf->rrq_depth) - 1; + attr->max_dest_rd_atomic = BIT(query_rqbuf->rsq_depth) - 1; + attr->min_rnr_timer = query_sqbuf->rnr_timer; + attr->port_num = 0; + attr->timeout = query_sqbuf->retry_timeout; + attr->retry_cnt = query_rqbuf->retry_rnrtry & 0xf; + attr->rnr_retry = query_rqbuf->retry_rnrtry >> 4; + attr->alt_port_num = 0; + attr->alt_timeout = 0; + attr->rate_limit = be32_to_cpu(query_sqbuf->rate_limit_kbps); + + if (mask & IB_QP_AV) + ionic_set_ah_attr(dev, &attr->ah_attr, + qp->hdr, qp->sgid_index); + +err_hdrdma: + if (mask & IB_QP_AV) { + dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, + PAGE_SIZE, DMA_FROM_DEVICE); + kfree(hdr_buf); + } +err_hdrbuf: + dma_unmap_single(dev->lif_cfg.hwdev, query_rqdma, sizeof(*query_rqbuf), + DMA_FROM_DEVICE); +err_rqdma: + dma_unmap_single(dev->lif_cfg.hwdev, query_sqdma, sizeof(*query_sqbuf), + DMA_FROM_DEVICE); +err_sqdma: + kfree(query_rqbuf); +err_rqbuf: + kfree(query_sqbuf); + + return rc; +} + +static int ionic_destroy_qp_cmd(struct ionic_ibdev *dev, u32 qpid) +{ + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = IONIC_V1_ADMIN_DESTROY_QP, + .len = cpu_to_le16(IONIC_ADMIN_DESTROY_QP_IN_V1_LEN), + .cmd.destroy_qp = { + .qp_id = cpu_to_le32(qpid), + }, + } + }; + + if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_QP) + return -EBADRQC; + + ionic_admin_post(dev, &wr); + + return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN); +} + +static bool ionic_expdb_wqe_size_supported(struct ionic_ibdev *dev, + uint32_t wqe_size) +{ + switch (wqe_size) { + case 64: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_64; + case 128: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_128; + case 256: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_256; + case 512: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_512; + } + + return false; +} + +static void ionic_qp_sq_init_cmb(struct ionic_ibdev *dev, + struct ionic_qp *qp, + struct ib_udata *udata, + int max_data) +{ + u8 expdb_stride_log2 = 0; + bool expdb; + int rc; + + if (!(qp->sq_cmb & IONIC_CMB_ENABLE)) + goto not_in_cmb; + + if (qp->sq_cmb & ~IONIC_CMB_SUPPORTED) { + if (qp->sq_cmb & IONIC_CMB_REQUIRE) + goto not_in_cmb; + + qp->sq_cmb &= IONIC_CMB_SUPPORTED; + } + + if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.sq_expdb) { + if (qp->sq_cmb & IONIC_CMB_REQUIRE) + goto not_in_cmb; + + qp->sq_cmb &= ~IONIC_CMB_EXPDB; + } + + qp->sq_cmb_order = order_base_2(qp->sq.size / PAGE_SIZE); + + if (qp->sq_cmb_order >= IONIC_SQCMB_ORDER) + goto not_in_cmb; + + if (qp->sq_cmb & IONIC_CMB_EXPDB) + expdb_stride_log2 = qp->sq.stride_log2; + + rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->sq_cmb_pgid, + &qp->sq_cmb_addr, qp->sq_cmb_order, + expdb_stride_log2, &expdb); + if (rc) + goto not_in_cmb; + + if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !expdb) { + if (qp->sq_cmb & IONIC_CMB_REQUIRE) + goto err_map; + + qp->sq_cmb &= ~IONIC_CMB_EXPDB; + } + + return; + +err_map: + ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order); +not_in_cmb: + if (qp->sq_cmb & IONIC_CMB_REQUIRE) + ibdev_dbg(&dev->ibdev, "could not place sq in cmb as required\n"); + + qp->sq_cmb = 0; + qp->sq_cmb_order = IONIC_RES_INVALID; + qp->sq_cmb_pgid = 0; + qp->sq_cmb_addr = 0; +} + +static void ionic_qp_sq_destroy_cmb(struct ionic_ibdev *dev, + struct ionic_ctx *ctx, + struct ionic_qp *qp) +{ + if (!(qp->sq_cmb & IONIC_CMB_ENABLE)) + return; + + if (ctx) + rdma_user_mmap_entry_remove(qp->mmap_sq_cmb); + + ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order); +} + +static int ionic_qp_sq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx, + struct ionic_qp *qp, struct ionic_qdesc *sq, + struct ionic_tbl_buf *buf, int max_wr, int max_sge, + int max_data, int sq_spec, struct ib_udata *udata) +{ + u32 wqe_size; + int rc = 0; + + qp->sq_msn_prod = 0; + qp->sq_msn_cons = 0; + + if (!qp->has_sq) { + if (buf) { + buf->tbl_buf = NULL; + buf->tbl_limit = 0; + buf->tbl_pages = 0; + } + if (udata) + rc = ionic_validate_qdesc_zero(sq); + + return rc; + } + + rc = -EINVAL; + + if (max_wr < 0 || max_wr > 0xffff) + return rc; + + if (max_sge < 1) + return rc; + + if (max_sge > min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0, + qp->sq_cmb & + IONIC_CMB_EXPDB), + IONIC_SPEC_HIGH)) + return rc; + + if (max_data < 0) + return rc; + + if (max_data > ionic_v1_send_wqe_max_data(dev->lif_cfg.max_stride, + qp->sq_cmb & IONIC_CMB_EXPDB)) + return rc; + + if (udata) { + rc = ionic_validate_qdesc(sq); + if (rc) + return rc; + + qp->sq_spec = sq_spec; + + qp->sq.ptr = NULL; + qp->sq.size = sq->size; + qp->sq.mask = sq->mask; + qp->sq.depth_log2 = sq->depth_log2; + qp->sq.stride_log2 = sq->stride_log2; + + qp->sq_meta = NULL; + qp->sq_msn_idx = NULL; + + qp->sq_umem = ib_umem_get(&dev->ibdev, sq->addr, sq->size, 0); + if (IS_ERR(qp->sq_umem)) + return PTR_ERR(qp->sq_umem); + } else { + qp->sq_umem = NULL; + + qp->sq_spec = ionic_v1_use_spec_sge(max_sge, sq_spec); + if (sq_spec && !qp->sq_spec) + ibdev_dbg(&dev->ibdev, + "init sq: max_sge %u disables spec\n", + max_sge); + + if (qp->sq_cmb & IONIC_CMB_EXPDB) { + wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data, + qp->sq_spec, + true); + + if (!ionic_expdb_wqe_size_supported(dev, wqe_size)) + qp->sq_cmb &= ~IONIC_CMB_EXPDB; + } + + if (!(qp->sq_cmb & IONIC_CMB_EXPDB)) + wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data, + qp->sq_spec, + false); + + rc = ionic_queue_init(&qp->sq, dev->lif_cfg.hwdev, + max_wr, wqe_size); + if (rc) + return rc; + + ionic_queue_dbell_init(&qp->sq, qp->qpid); + + qp->sq_meta = kmalloc_array((u32)qp->sq.mask + 1, + sizeof(*qp->sq_meta), + GFP_KERNEL); + if (!qp->sq_meta) { + rc = -ENOMEM; + goto err_sq_meta; + } + + qp->sq_msn_idx = kmalloc_array((u32)qp->sq.mask + 1, + sizeof(*qp->sq_msn_idx), + GFP_KERNEL); + if (!qp->sq_msn_idx) { + rc = -ENOMEM; + goto err_sq_msn; + } + } + + ionic_qp_sq_init_cmb(dev, qp, udata, max_data); + + if (qp->sq_cmb & IONIC_CMB_ENABLE) + rc = ionic_pgtbl_init(dev, buf, NULL, + (u64)qp->sq_cmb_pgid << PAGE_SHIFT, + 1, PAGE_SIZE); + else + rc = ionic_pgtbl_init(dev, buf, + qp->sq_umem, qp->sq.dma, 1, PAGE_SIZE); + if (rc) + goto err_sq_tbl; + + return 0; + +err_sq_tbl: + ionic_qp_sq_destroy_cmb(dev, ctx, qp); + kfree(qp->sq_msn_idx); +err_sq_msn: + kfree(qp->sq_meta); +err_sq_meta: + if (qp->sq_umem) + ib_umem_release(qp->sq_umem); + else + ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev); + return rc; +} + +static void ionic_qp_sq_destroy(struct ionic_ibdev *dev, + struct ionic_ctx *ctx, + struct ionic_qp *qp) +{ + if (!qp->has_sq) + return; + + ionic_qp_sq_destroy_cmb(dev, ctx, qp); + + kfree(qp->sq_msn_idx); + kfree(qp->sq_meta); + + if (qp->sq_umem) + ib_umem_release(qp->sq_umem); + else + ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev); +} + +static void ionic_qp_rq_init_cmb(struct ionic_ibdev *dev, + struct ionic_qp *qp, + struct ib_udata *udata) +{ + u8 expdb_stride_log2 = 0; + bool expdb; + int rc; + + if (!(qp->rq_cmb & IONIC_CMB_ENABLE)) + goto not_in_cmb; + + if (qp->rq_cmb & ~IONIC_CMB_SUPPORTED) { + if (qp->rq_cmb & IONIC_CMB_REQUIRE) + goto not_in_cmb; + + qp->rq_cmb &= IONIC_CMB_SUPPORTED; + } + + if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.rq_expdb) { + if (qp->rq_cmb & IONIC_CMB_REQUIRE) + goto not_in_cmb; + + qp->rq_cmb &= ~IONIC_CMB_EXPDB; + } + + qp->rq_cmb_order = order_base_2(qp->rq.size / PAGE_SIZE); + + if (qp->rq_cmb_order >= IONIC_RQCMB_ORDER) + goto not_in_cmb; + + if (qp->rq_cmb & IONIC_CMB_EXPDB) + expdb_stride_log2 = qp->rq.stride_log2; + + rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->rq_cmb_pgid, + &qp->rq_cmb_addr, qp->rq_cmb_order, + expdb_stride_log2, &expdb); + if (rc) + goto not_in_cmb; + + if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !expdb) { + if (qp->rq_cmb & IONIC_CMB_REQUIRE) + goto err_map; + + qp->rq_cmb &= ~IONIC_CMB_EXPDB; + } + + return; + +err_map: + ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order); +not_in_cmb: + if (qp->rq_cmb & IONIC_CMB_REQUIRE) + ibdev_dbg(&dev->ibdev, "could not place rq in cmb as required\n"); + + qp->rq_cmb = 0; + qp->rq_cmb_order = IONIC_RES_INVALID; + qp->rq_cmb_pgid = 0; + qp->rq_cmb_addr = 0; +} + +static void ionic_qp_rq_destroy_cmb(struct ionic_ibdev *dev, + struct ionic_ctx *ctx, + struct ionic_qp *qp) +{ + if (!(qp->rq_cmb & IONIC_CMB_ENABLE)) + return; + + if (ctx) + rdma_user_mmap_entry_remove(qp->mmap_rq_cmb); + + ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order); +} + +static int ionic_qp_rq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx, + struct ionic_qp *qp, struct ionic_qdesc *rq, + struct ionic_tbl_buf *buf, int max_wr, int max_sge, + int rq_spec, struct ib_udata *udata) +{ + int rc = 0, i; + u32 wqe_size; + + if (!qp->has_rq) { + if (buf) { + buf->tbl_buf = NULL; + buf->tbl_limit = 0; + buf->tbl_pages = 0; + } + if (udata) + rc = ionic_validate_qdesc_zero(rq); + + return rc; + } + + rc = -EINVAL; + + if (max_wr < 0 || max_wr > 0xffff) + return rc; + + if (max_sge < 1) + return rc; + + if (max_sge > min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false), + IONIC_SPEC_HIGH)) + return rc; + + if (udata) { + rc = ionic_validate_qdesc(rq); + if (rc) + return rc; + + qp->rq_spec = rq_spec; + + qp->rq.ptr = NULL; + qp->rq.size = rq->size; + qp->rq.mask = rq->mask; + qp->rq.depth_log2 = rq->depth_log2; + qp->rq.stride_log2 = rq->stride_log2; + + qp->rq_meta = NULL; + + qp->rq_umem = ib_umem_get(&dev->ibdev, rq->addr, rq->size, 0); + if (IS_ERR(qp->rq_umem)) + return PTR_ERR(qp->rq_umem); + } else { + qp->rq_umem = NULL; + + qp->rq_spec = ionic_v1_use_spec_sge(max_sge, rq_spec); + if (rq_spec && !qp->rq_spec) + ibdev_dbg(&dev->ibdev, + "init rq: max_sge %u disables spec\n", + max_sge); + + if (qp->rq_cmb & IONIC_CMB_EXPDB) { + wqe_size = ionic_v1_recv_wqe_min_size(max_sge, + qp->rq_spec, + true); + + if (!ionic_expdb_wqe_size_supported(dev, wqe_size)) + qp->rq_cmb &= ~IONIC_CMB_EXPDB; + } + + if (!(qp->rq_cmb & IONIC_CMB_EXPDB)) + wqe_size = ionic_v1_recv_wqe_min_size(max_sge, + qp->rq_spec, + false); + + rc = ionic_queue_init(&qp->rq, dev->lif_cfg.hwdev, + max_wr, wqe_size); + if (rc) + return rc; + + ionic_queue_dbell_init(&qp->rq, qp->qpid); + + qp->rq_meta = kmalloc_array((u32)qp->rq.mask + 1, + sizeof(*qp->rq_meta), + GFP_KERNEL); + if (!qp->rq_meta) { + rc = -ENOMEM; + goto err_rq_meta; + } + + for (i = 0; i < qp->rq.mask; ++i) + qp->rq_meta[i].next = &qp->rq_meta[i + 1]; + qp->rq_meta[i].next = IONIC_META_LAST; + qp->rq_meta_head = &qp->rq_meta[0]; + } + + ionic_qp_rq_init_cmb(dev, qp, udata); + + if (qp->rq_cmb & IONIC_CMB_ENABLE) + rc = ionic_pgtbl_init(dev, buf, NULL, + (u64)qp->rq_cmb_pgid << PAGE_SHIFT, + 1, PAGE_SIZE); + else + rc = ionic_pgtbl_init(dev, buf, + qp->rq_umem, qp->rq.dma, 1, PAGE_SIZE); + if (rc) + goto err_rq_tbl; + + return 0; + +err_rq_tbl: + ionic_qp_rq_destroy_cmb(dev, ctx, qp); + kfree(qp->rq_meta); +err_rq_meta: + if (qp->rq_umem) + ib_umem_release(qp->rq_umem); + else + ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev); + return rc; +} + +static void ionic_qp_rq_destroy(struct ionic_ibdev *dev, + struct ionic_ctx *ctx, + struct ionic_qp *qp) +{ + if (!qp->has_rq) + return; + + ionic_qp_rq_destroy_cmb(dev, ctx, qp); + + kfree(qp->rq_meta); + + if (qp->rq_umem) + ib_umem_release(qp->rq_umem); + else + ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev); +} + +int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, + struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device); + struct ionic_tbl_buf sq_buf = {}, rq_buf = {}; + struct ionic_pd *pd = to_ionic_pd(ibqp->pd); + struct ionic_qp *qp = to_ionic_qp(ibqp); + struct ionic_ctx *ctx = + rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx); + struct ionic_qp_resp resp = {}; + struct ionic_qp_req req = {}; + struct ionic_cq *cq; + u8 udma_mask; + void *entry; + int rc; + + if (udata) { + rc = ib_copy_from_udata(&req, udata, sizeof(req)); + if (rc) + return rc; + } else { + req.sq_spec = IONIC_SPEC_HIGH; + req.rq_spec = IONIC_SPEC_HIGH; + } + + if (attr->qp_type == IB_QPT_SMI || attr->qp_type > IB_QPT_UD) + return -EOPNOTSUPP; + + qp->state = IB_QPS_RESET; + + INIT_LIST_HEAD(&qp->cq_poll_sq); + INIT_LIST_HEAD(&qp->cq_flush_sq); + INIT_LIST_HEAD(&qp->cq_flush_rq); + + spin_lock_init(&qp->sq_lock); + spin_lock_init(&qp->rq_lock); + + qp->has_sq = 1; + qp->has_rq = 1; + + if (attr->qp_type == IB_QPT_GSI) { + rc = ionic_get_gsi_qpid(dev, &qp->qpid); + } else { + udma_mask = BIT(dev->lif_cfg.udma_count) - 1; + + if (qp->has_sq) + udma_mask &= to_ionic_vcq(attr->send_cq)->udma_mask; + + if (qp->has_rq) + udma_mask &= to_ionic_vcq(attr->recv_cq)->udma_mask; + + if (udata && req.udma_mask) + udma_mask &= req.udma_mask; + + if (!udma_mask) + return -EINVAL; + + rc = ionic_get_qpid(dev, &qp->qpid, &qp->udma_idx, udma_mask); + } + if (rc) + return rc; + + qp->sig_all = attr->sq_sig_type == IB_SIGNAL_ALL_WR; + qp->has_ah = attr->qp_type == IB_QPT_RC; + + if (qp->has_ah) { + qp->hdr = kzalloc(sizeof(*qp->hdr), GFP_KERNEL); + if (!qp->hdr) { + rc = -ENOMEM; + goto err_ah_alloc; + } + + rc = ionic_get_ahid(dev, &qp->ahid); + if (rc) + goto err_ahid; + } + + if (udata) { + if (req.rq_cmb & IONIC_CMB_ENABLE) + qp->rq_cmb = req.rq_cmb; + + if (req.sq_cmb & IONIC_CMB_ENABLE) + qp->sq_cmb = req.sq_cmb; + } + + rc = ionic_qp_sq_init(dev, ctx, qp, &req.sq, &sq_buf, + attr->cap.max_send_wr, attr->cap.max_send_sge, + attr->cap.max_inline_data, req.sq_spec, udata); + if (rc) + goto err_sq; + + rc = ionic_qp_rq_init(dev, ctx, qp, &req.rq, &rq_buf, + attr->cap.max_recv_wr, attr->cap.max_recv_sge, + req.rq_spec, udata); + if (rc) + goto err_rq; + + rc = ionic_create_qp_cmd(dev, pd, + to_ionic_vcq_cq(attr->send_cq, qp->udma_idx), + to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx), + qp, &sq_buf, &rq_buf, attr); + if (rc) + goto err_cmd; + + if (udata) { + resp.qpid = qp->qpid; + resp.udma_idx = qp->udma_idx; + + if (qp->sq_cmb & IONIC_CMB_ENABLE) { + bool wc; + + if ((qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) == + (IONIC_CMB_WC | IONIC_CMB_UC)) { + ibdev_dbg(&dev->ibdev, + "Both sq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n"); + qp->sq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC); + } + + wc = (qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) + != IONIC_CMB_UC; + + /* let userspace know the mapping */ + if (wc) + qp->sq_cmb |= IONIC_CMB_WC; + else + qp->sq_cmb |= IONIC_CMB_UC; + + qp->mmap_sq_cmb = + ionic_mmap_entry_insert(ctx, + qp->sq.size, + PHYS_PFN(qp->sq_cmb_addr), + wc ? IONIC_MMAP_WC : 0, + &resp.sq_cmb_offset); + if (!qp->mmap_sq_cmb) { + rc = -ENOMEM; + goto err_mmap_sq; + } + + resp.sq_cmb = qp->sq_cmb; + } + + if (qp->rq_cmb & IONIC_CMB_ENABLE) { + bool wc; + + if ((qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) == + (IONIC_CMB_WC | IONIC_CMB_UC)) { + ibdev_dbg(&dev->ibdev, + "Both rq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n"); + qp->rq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC); + } + + if (qp->rq_cmb & IONIC_CMB_EXPDB) + wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) + == IONIC_CMB_WC; + else + wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) + != IONIC_CMB_UC; + + /* let userspace know the mapping */ + if (wc) + qp->rq_cmb |= IONIC_CMB_WC; + else + qp->rq_cmb |= IONIC_CMB_UC; + + qp->mmap_rq_cmb = + ionic_mmap_entry_insert(ctx, + qp->rq.size, + PHYS_PFN(qp->rq_cmb_addr), + wc ? IONIC_MMAP_WC : 0, + &resp.rq_cmb_offset); + if (!qp->mmap_rq_cmb) { + rc = -ENOMEM; + goto err_mmap_rq; + } + + resp.rq_cmb = qp->rq_cmb; + } + + rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (rc) + goto err_resp; + } + + ionic_pgtbl_unbuf(dev, &rq_buf); + ionic_pgtbl_unbuf(dev, &sq_buf); + + qp->ibqp.qp_num = qp->qpid; + + init_completion(&qp->qp_rel_comp); + kref_init(&qp->qp_kref); + + entry = xa_store_irq(&dev->qp_tbl, qp->qpid, qp, GFP_KERNEL); + if (entry) { + if (!xa_is_err(entry)) + rc = -EINVAL; + else + rc = xa_err(entry); + + goto err_resp; + } + + if (qp->has_sq) { + cq = to_ionic_vcq_cq(attr->send_cq, qp->udma_idx); + + attr->cap.max_send_wr = qp->sq.mask; + attr->cap.max_send_sge = + ionic_v1_send_wqe_max_sge(qp->sq.stride_log2, + qp->sq_spec, + qp->sq_cmb & IONIC_CMB_EXPDB); + attr->cap.max_inline_data = + ionic_v1_send_wqe_max_data(qp->sq.stride_log2, + qp->sq_cmb & + IONIC_CMB_EXPDB); + qp->sq_cqid = cq->cqid; + } + + if (qp->has_rq) { + cq = to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx); + + attr->cap.max_recv_wr = qp->rq.mask; + attr->cap.max_recv_sge = + ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, + qp->rq_spec, + qp->rq_cmb & IONIC_CMB_EXPDB); + qp->rq_cqid = cq->cqid; + } + + return 0; + +err_resp: + if (udata && (qp->rq_cmb & IONIC_CMB_ENABLE)) + rdma_user_mmap_entry_remove(qp->mmap_rq_cmb); +err_mmap_rq: + if (udata && (qp->sq_cmb & IONIC_CMB_ENABLE)) + rdma_user_mmap_entry_remove(qp->mmap_sq_cmb); +err_mmap_sq: + ionic_destroy_qp_cmd(dev, qp->qpid); +err_cmd: + ionic_pgtbl_unbuf(dev, &rq_buf); + ionic_qp_rq_destroy(dev, ctx, qp); +err_rq: + ionic_pgtbl_unbuf(dev, &sq_buf); + ionic_qp_sq_destroy(dev, ctx, qp); +err_sq: + if (qp->has_ah) + ionic_put_ahid(dev, qp->ahid); +err_ahid: + kfree(qp->hdr); +err_ah_alloc: + ionic_put_qpid(dev, qp->qpid); + return rc; +} + +void ionic_notify_flush_cq(struct ionic_cq *cq) +{ + if (cq->flush && cq->vcq->ibcq.comp_handler) + cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq, + cq->vcq->ibcq.cq_context); +} + +static void ionic_notify_qp_cqs(struct ionic_ibdev *dev, struct ionic_qp *qp) +{ + if (qp->ibqp.send_cq) + ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.send_cq, + qp->udma_idx)); + if (qp->ibqp.recv_cq && qp->ibqp.recv_cq != qp->ibqp.send_cq) + ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.recv_cq, + qp->udma_idx)); +} + +void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp) +{ + unsigned long irqflags; + struct ionic_cq *cq; + + if (qp->ibqp.send_cq) { + cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx); + + /* Hold the CQ lock and QP sq_lock to set up flush */ + spin_lock_irqsave(&cq->lock, irqflags); + spin_lock(&qp->sq_lock); + qp->sq_flush = true; + if (!ionic_queue_empty(&qp->sq)) { + cq->flush = true; + list_move_tail(&qp->cq_flush_sq, &cq->flush_sq); + } + spin_unlock(&qp->sq_lock); + spin_unlock_irqrestore(&cq->lock, irqflags); + } + + if (qp->ibqp.recv_cq) { + cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx); + + /* Hold the CQ lock and QP rq_lock to set up flush */ + spin_lock_irqsave(&cq->lock, irqflags); + spin_lock(&qp->rq_lock); + qp->rq_flush = true; + if (!ionic_queue_empty(&qp->rq)) { + cq->flush = true; + list_move_tail(&qp->cq_flush_rq, &cq->flush_rq); + } + spin_unlock(&qp->rq_lock); + spin_unlock_irqrestore(&cq->lock, irqflags); + } +} + +static void ionic_clean_cq(struct ionic_cq *cq, u32 qpid) +{ + struct ionic_v1_cqe *qcqe; + int prod, qtf, qid, type; + bool color; + + if (!cq->q.ptr) + return; + + color = cq->color; + prod = cq->q.prod; + qcqe = ionic_queue_at(&cq->q, prod); + + while (color == ionic_v1_cqe_color(qcqe)) { + qtf = ionic_v1_cqe_qtf(qcqe); + qid = ionic_v1_cqe_qtf_qid(qtf); + type = ionic_v1_cqe_qtf_type(qtf); + + if (qid == qpid && type != IONIC_V1_CQE_TYPE_ADMIN) + ionic_v1_cqe_clean(qcqe); + + prod = ionic_queue_next(&cq->q, prod); + qcqe = ionic_queue_at(&cq->q, prod); + color = ionic_color_wrap(prod, color); + } +} + +static void ionic_reset_qp(struct ionic_ibdev *dev, struct ionic_qp *qp) +{ + unsigned long irqflags; + struct ionic_cq *cq; + int i; + + local_irq_save(irqflags); + + if (qp->ibqp.send_cq) { + cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx); + spin_lock(&cq->lock); + ionic_clean_cq(cq, qp->qpid); + spin_unlock(&cq->lock); + } + + if (qp->ibqp.recv_cq) { + cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx); + spin_lock(&cq->lock); + ionic_clean_cq(cq, qp->qpid); + spin_unlock(&cq->lock); + } + + if (qp->has_sq) { + spin_lock(&qp->sq_lock); + qp->sq_flush = false; + qp->sq_flush_rcvd = false; + qp->sq_msn_prod = 0; + qp->sq_msn_cons = 0; + qp->sq.prod = 0; + qp->sq.cons = 0; + spin_unlock(&qp->sq_lock); + } + + if (qp->has_rq) { + spin_lock(&qp->rq_lock); + qp->rq_flush = false; + qp->rq.prod = 0; + qp->rq.cons = 0; + if (qp->rq_meta) { + for (i = 0; i < qp->rq.mask; ++i) + qp->rq_meta[i].next = &qp->rq_meta[i + 1]; + qp->rq_meta[i].next = IONIC_META_LAST; + } + qp->rq_meta_head = &qp->rq_meta[0]; + spin_unlock(&qp->rq_lock); + } + + local_irq_restore(irqflags); +} + +static bool ionic_qp_cur_state_is_ok(enum ib_qp_state q_state, + enum ib_qp_state attr_state) +{ + if (q_state == attr_state) + return true; + + if (attr_state == IB_QPS_ERR) + return true; + + if (attr_state == IB_QPS_SQE) + return q_state == IB_QPS_RTS || q_state == IB_QPS_SQD; + + return false; +} + +static int ionic_check_modify_qp(struct ionic_qp *qp, struct ib_qp_attr *attr, + int mask) +{ + enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? + attr->cur_qp_state : qp->state; + enum ib_qp_state next_state = (mask & IB_QP_STATE) ? + attr->qp_state : cur_state; + + if ((mask & IB_QP_CUR_STATE) && + !ionic_qp_cur_state_is_ok(qp->state, attr->cur_qp_state)) + return -EINVAL; + + if (!ib_modify_qp_is_ok(cur_state, next_state, qp->ibqp.qp_type, mask)) + return -EINVAL; + + /* unprivileged qp not allowed privileged qkey */ + if ((mask & IB_QP_QKEY) && (attr->qkey & 0x80000000) && + qp->ibqp.uobject) + return -EPERM; + + return 0; +} + +int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, + struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device); + struct ionic_pd *pd = to_ionic_pd(ibqp->pd); + struct ionic_qp *qp = to_ionic_qp(ibqp); + int rc; + + rc = ionic_check_modify_qp(qp, attr, mask); + if (rc) + return rc; + + if (mask & IB_QP_CAP) + return -EINVAL; + + rc = ionic_modify_qp_cmd(dev, pd, qp, attr, mask); + if (rc) + return rc; + + if (mask & IB_QP_STATE) { + qp->state = attr->qp_state; + + if (attr->qp_state == IB_QPS_ERR) { + ionic_flush_qp(dev, qp); + ionic_notify_qp_cqs(dev, qp); + } else if (attr->qp_state == IB_QPS_RESET) { + ionic_reset_qp(dev, qp); + } + } + + return 0; +} + +int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int mask, struct ib_qp_init_attr *init_attr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device); + struct ionic_qp *qp = to_ionic_qp(ibqp); + int rc; + + memset(attr, 0, sizeof(*attr)); + memset(init_attr, 0, sizeof(*init_attr)); + + rc = ionic_query_qp_cmd(dev, qp, attr, mask); + if (rc) + return rc; + + if (qp->has_sq) + attr->cap.max_send_wr = qp->sq.mask; + + if (qp->has_rq) + attr->cap.max_recv_wr = qp->rq.mask; + + init_attr->event_handler = ibqp->event_handler; + init_attr->qp_context = ibqp->qp_context; + init_attr->send_cq = ibqp->send_cq; + init_attr->recv_cq = ibqp->recv_cq; + init_attr->srq = ibqp->srq; + init_attr->xrcd = ibqp->xrcd; + init_attr->cap = attr->cap; + init_attr->sq_sig_type = qp->sig_all ? + IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; + init_attr->qp_type = ibqp->qp_type; + init_attr->create_flags = 0; + init_attr->port_num = 0; + init_attr->rwq_ind_tbl = ibqp->rwq_ind_tbl; + init_attr->source_qpn = 0; + + return rc; +} + +int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) +{ + struct ionic_ctx *ctx = + rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx); + struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device); + struct ionic_qp *qp = to_ionic_qp(ibqp); + unsigned long irqflags; + struct ionic_cq *cq; + int rc; + + rc = ionic_destroy_qp_cmd(dev, qp->qpid); + if (rc) + return rc; + + xa_erase_irq(&dev->qp_tbl, qp->qpid); + + kref_put(&qp->qp_kref, ionic_qp_complete); + wait_for_completion(&qp->qp_rel_comp); + + if (qp->ibqp.send_cq) { + cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx); + spin_lock_irqsave(&cq->lock, irqflags); + ionic_clean_cq(cq, qp->qpid); + list_del(&qp->cq_poll_sq); + list_del(&qp->cq_flush_sq); + spin_unlock_irqrestore(&cq->lock, irqflags); + } + + if (qp->ibqp.recv_cq) { + cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx); + spin_lock_irqsave(&cq->lock, irqflags); + ionic_clean_cq(cq, qp->qpid); + list_del(&qp->cq_flush_rq); + spin_unlock_irqrestore(&cq->lock, irqflags); + } + + ionic_qp_rq_destroy(dev, ctx, qp); + ionic_qp_sq_destroy(dev, ctx, qp); + if (qp->has_ah) { + ionic_put_ahid(dev, qp->ahid); + kfree(qp->hdr); + } + ionic_put_qpid(dev, qp->qpid); + + return 0; +} diff --git a/drivers/infiniband/hw/ionic/ionic_datapath.c b/drivers/infiniband/hw/ionic/ionic_datapath.c new file mode 100644 index 000000000000..aa2944887f23 --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_datapath.c @@ -0,0 +1,1399 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#include <linux/module.h> +#include <linux/printk.h> +#include <rdma/ib_addr.h> +#include <rdma/ib_user_verbs.h> + +#include "ionic_fw.h" +#include "ionic_ibdev.h" + +#define IONIC_OP(version, opname) \ + ((version) < 2 ? IONIC_V1_OP_##opname : IONIC_V2_OP_##opname) + +static bool ionic_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq, + struct ionic_v1_cqe **cqe) +{ + struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q); + + if (unlikely(cq->color != ionic_v1_cqe_color(qcqe))) + return false; + + /* Prevent out-of-order reads of the CQE */ + dma_rmb(); + + *cqe = qcqe; + + return true; +} + +static int ionic_flush_recv(struct ionic_qp *qp, struct ib_wc *wc) +{ + struct ionic_rq_meta *meta; + struct ionic_v1_wqe *wqe; + + if (!qp->rq_flush) + return 0; + + if (ionic_queue_empty(&qp->rq)) + return 0; + + wqe = ionic_queue_at_cons(&qp->rq); + + /* wqe_id must be a valid queue index */ + if (unlikely(wqe->base.wqe_id >> qp->rq.depth_log2)) { + ibdev_warn(qp->ibqp.device, + "flush qp %u recv index %llu invalid\n", + qp->qpid, (unsigned long long)wqe->base.wqe_id); + return -EIO; + } + + /* wqe_id must indicate a request that is outstanding */ + meta = &qp->rq_meta[wqe->base.wqe_id]; + if (unlikely(meta->next != IONIC_META_POSTED)) { + ibdev_warn(qp->ibqp.device, + "flush qp %u recv index %llu not posted\n", + qp->qpid, (unsigned long long)wqe->base.wqe_id); + return -EIO; + } + + ionic_queue_consume(&qp->rq); + + memset(wc, 0, sizeof(*wc)); + + wc->status = IB_WC_WR_FLUSH_ERR; + wc->wr_id = meta->wrid; + wc->qp = &qp->ibqp; + + meta->next = qp->rq_meta_head; + qp->rq_meta_head = meta; + + return 1; +} + +static int ionic_flush_recv_many(struct ionic_qp *qp, + struct ib_wc *wc, int nwc) +{ + int rc = 0, npolled = 0; + + while (npolled < nwc) { + rc = ionic_flush_recv(qp, wc + npolled); + if (rc <= 0) + break; + + npolled += rc; + } + + return npolled ?: rc; +} + +static int ionic_flush_send(struct ionic_qp *qp, struct ib_wc *wc) +{ + struct ionic_sq_meta *meta; + + if (!qp->sq_flush) + return 0; + + if (ionic_queue_empty(&qp->sq)) + return 0; + + meta = &qp->sq_meta[qp->sq.cons]; + + ionic_queue_consume(&qp->sq); + + memset(wc, 0, sizeof(*wc)); + + wc->status = IB_WC_WR_FLUSH_ERR; + wc->wr_id = meta->wrid; + wc->qp = &qp->ibqp; + + return 1; +} + +static int ionic_flush_send_many(struct ionic_qp *qp, + struct ib_wc *wc, int nwc) +{ + int rc = 0, npolled = 0; + + while (npolled < nwc) { + rc = ionic_flush_send(qp, wc + npolled); + if (rc <= 0) + break; + + npolled += rc; + } + + return npolled ?: rc; +} + +static int ionic_poll_recv(struct ionic_ibdev *dev, struct ionic_cq *cq, + struct ionic_qp *cqe_qp, struct ionic_v1_cqe *cqe, + struct ib_wc *wc) +{ + struct ionic_qp *qp = NULL; + struct ionic_rq_meta *meta; + u32 src_qpn, st_len; + u16 vlan_tag; + u8 op; + + if (cqe_qp->rq_flush) + return 0; + + qp = cqe_qp; + + st_len = be32_to_cpu(cqe->status_length); + + /* ignore wqe_id in case of flush error */ + if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) { + cqe_qp->rq_flush = true; + cq->flush = true; + list_move_tail(&qp->cq_flush_rq, &cq->flush_rq); + + /* posted recvs (if any) flushed by ionic_flush_recv */ + return 0; + } + + /* there had better be something in the recv queue to complete */ + if (ionic_queue_empty(&qp->rq)) { + ibdev_warn(&dev->ibdev, "qp %u is empty\n", qp->qpid); + return -EIO; + } + + /* wqe_id must be a valid queue index */ + if (unlikely(cqe->recv.wqe_id >> qp->rq.depth_log2)) { + ibdev_warn(&dev->ibdev, + "qp %u recv index %llu invalid\n", + qp->qpid, (unsigned long long)cqe->recv.wqe_id); + return -EIO; + } + + /* wqe_id must indicate a request that is outstanding */ + meta = &qp->rq_meta[cqe->recv.wqe_id]; + if (unlikely(meta->next != IONIC_META_POSTED)) { + ibdev_warn(&dev->ibdev, + "qp %u recv index %llu not posted\n", + qp->qpid, (unsigned long long)cqe->recv.wqe_id); + return -EIO; + } + + meta->next = qp->rq_meta_head; + qp->rq_meta_head = meta; + + memset(wc, 0, sizeof(*wc)); + + wc->wr_id = meta->wrid; + + wc->qp = &cqe_qp->ibqp; + + if (ionic_v1_cqe_error(cqe)) { + wc->vendor_err = st_len; + wc->status = ionic_to_ib_status(st_len); + + cqe_qp->rq_flush = true; + cq->flush = true; + list_move_tail(&qp->cq_flush_rq, &cq->flush_rq); + + ibdev_warn(&dev->ibdev, + "qp %d recv cqe with error\n", qp->qpid); + print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1, + cqe, BIT(cq->q.stride_log2), true); + goto out; + } + + wc->vendor_err = 0; + wc->status = IB_WC_SUCCESS; + + src_qpn = be32_to_cpu(cqe->recv.src_qpn_op); + op = src_qpn >> IONIC_V1_CQE_RECV_OP_SHIFT; + + src_qpn &= IONIC_V1_CQE_RECV_QPN_MASK; + op &= IONIC_V1_CQE_RECV_OP_MASK; + + wc->opcode = IB_WC_RECV; + switch (op) { + case IONIC_V1_CQE_RECV_OP_RDMA_IMM: + wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; + wc->wc_flags |= IB_WC_WITH_IMM; + wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */ + break; + case IONIC_V1_CQE_RECV_OP_SEND_IMM: + wc->wc_flags |= IB_WC_WITH_IMM; + wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */ + break; + case IONIC_V1_CQE_RECV_OP_SEND_INV: + wc->wc_flags |= IB_WC_WITH_INVALIDATE; + wc->ex.invalidate_rkey = be32_to_cpu(cqe->recv.imm_data_rkey); + break; + } + + wc->byte_len = st_len; + wc->src_qp = src_qpn; + + if (qp->ibqp.qp_type == IB_QPT_UD || + qp->ibqp.qp_type == IB_QPT_GSI) { + wc->wc_flags |= IB_WC_GRH | IB_WC_WITH_SMAC; + ether_addr_copy(wc->smac, cqe->recv.src_mac); + + wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; + if (ionic_v1_cqe_recv_is_ipv4(cqe)) + wc->network_hdr_type = RDMA_NETWORK_IPV4; + else + wc->network_hdr_type = RDMA_NETWORK_IPV6; + + if (ionic_v1_cqe_recv_is_vlan(cqe)) + wc->wc_flags |= IB_WC_WITH_VLAN; + + /* vlan_tag in cqe will be valid from dpath even if no vlan */ + vlan_tag = be16_to_cpu(cqe->recv.vlan_tag); + wc->vlan_id = vlan_tag & 0xfff; /* 802.1q VID */ + wc->sl = vlan_tag >> VLAN_PRIO_SHIFT; /* 802.1q PCP */ + } + + wc->pkey_index = 0; + wc->port_num = 1; + +out: + ionic_queue_consume(&qp->rq); + + return 1; +} + +static bool ionic_peek_send(struct ionic_qp *qp) +{ + struct ionic_sq_meta *meta; + + if (qp->sq_flush) + return false; + + /* completed all send queue requests */ + if (ionic_queue_empty(&qp->sq)) + return false; + + meta = &qp->sq_meta[qp->sq.cons]; + + /* waiting for remote completion */ + if (meta->remote && meta->seq == qp->sq_msn_cons) + return false; + + /* waiting for local completion */ + if (!meta->remote && !meta->local_comp) + return false; + + return true; +} + +static int ionic_poll_send(struct ionic_ibdev *dev, struct ionic_cq *cq, + struct ionic_qp *qp, struct ib_wc *wc) +{ + struct ionic_sq_meta *meta; + + if (qp->sq_flush) + return 0; + + do { + /* completed all send queue requests */ + if (ionic_queue_empty(&qp->sq)) + goto out_empty; + + meta = &qp->sq_meta[qp->sq.cons]; + + /* waiting for remote completion */ + if (meta->remote && meta->seq == qp->sq_msn_cons) + goto out_empty; + + /* waiting for local completion */ + if (!meta->remote && !meta->local_comp) + goto out_empty; + + ionic_queue_consume(&qp->sq); + + /* produce wc only if signaled or error status */ + } while (!meta->signal && meta->ibsts == IB_WC_SUCCESS); + + memset(wc, 0, sizeof(*wc)); + + wc->status = meta->ibsts; + wc->wr_id = meta->wrid; + wc->qp = &qp->ibqp; + + if (meta->ibsts == IB_WC_SUCCESS) { + wc->byte_len = meta->len; + wc->opcode = meta->ibop; + } else { + wc->vendor_err = meta->len; + + qp->sq_flush = true; + cq->flush = true; + list_move_tail(&qp->cq_flush_sq, &cq->flush_sq); + } + + return 1; + +out_empty: + if (qp->sq_flush_rcvd) { + qp->sq_flush = true; + cq->flush = true; + list_move_tail(&qp->cq_flush_sq, &cq->flush_sq); + } + return 0; +} + +static int ionic_poll_send_many(struct ionic_ibdev *dev, struct ionic_cq *cq, + struct ionic_qp *qp, struct ib_wc *wc, int nwc) +{ + int rc = 0, npolled = 0; + + while (npolled < nwc) { + rc = ionic_poll_send(dev, cq, qp, wc + npolled); + if (rc <= 0) + break; + + npolled += rc; + } + + return npolled ?: rc; +} + +static int ionic_validate_cons(u16 prod, u16 cons, + u16 comp, u16 mask) +{ + if (((prod - cons) & mask) <= ((comp - cons) & mask)) + return -EIO; + + return 0; +} + +static int ionic_comp_msn(struct ionic_qp *qp, struct ionic_v1_cqe *cqe) +{ + struct ionic_sq_meta *meta; + u16 cqe_seq, cqe_idx; + int rc; + + if (qp->sq_flush) + return 0; + + cqe_seq = be32_to_cpu(cqe->send.msg_msn) & qp->sq.mask; + + rc = ionic_validate_cons(qp->sq_msn_prod, + qp->sq_msn_cons, + cqe_seq - 1, + qp->sq.mask); + if (rc) { + ibdev_warn(qp->ibqp.device, + "qp %u bad msn %#x seq %u for prod %u cons %u\n", + qp->qpid, be32_to_cpu(cqe->send.msg_msn), + cqe_seq, qp->sq_msn_prod, qp->sq_msn_cons); + return rc; + } + + qp->sq_msn_cons = cqe_seq; + + if (ionic_v1_cqe_error(cqe)) { + cqe_idx = qp->sq_msn_idx[(cqe_seq - 1) & qp->sq.mask]; + + meta = &qp->sq_meta[cqe_idx]; + meta->len = be32_to_cpu(cqe->status_length); + meta->ibsts = ionic_to_ib_status(meta->len); + + ibdev_warn(qp->ibqp.device, + "qp %d msn cqe with error\n", qp->qpid); + print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1, + cqe, sizeof(*cqe), true); + } + + return 0; +} + +static int ionic_comp_npg(struct ionic_qp *qp, struct ionic_v1_cqe *cqe) +{ + struct ionic_sq_meta *meta; + u16 cqe_idx; + u32 st_len; + + if (qp->sq_flush) + return 0; + + st_len = be32_to_cpu(cqe->status_length); + + if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) { + /* + * Flush cqe does not consume a wqe on the device, and maybe + * no such work request is posted. + * + * The driver should begin flushing after the last indicated + * normal or error completion. Here, only set a hint that the + * flush request was indicated. In poll_send, if nothing more + * can be polled normally, then begin flushing. + */ + qp->sq_flush_rcvd = true; + return 0; + } + + cqe_idx = cqe->send.npg_wqe_id & qp->sq.mask; + meta = &qp->sq_meta[cqe_idx]; + meta->local_comp = true; + + if (ionic_v1_cqe_error(cqe)) { + meta->len = st_len; + meta->ibsts = ionic_to_ib_status(st_len); + meta->remote = false; + ibdev_warn(qp->ibqp.device, + "qp %d npg cqe with error\n", qp->qpid); + print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1, + cqe, sizeof(*cqe), true); + } + + return 0; +} + +static void ionic_reserve_sync_cq(struct ionic_ibdev *dev, struct ionic_cq *cq) +{ + if (!ionic_queue_empty(&cq->q)) { + cq->credit += ionic_queue_length(&cq->q); + cq->q.cons = cq->q.prod; + + ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, + ionic_queue_dbell_val(&cq->q)); + } +} + +static void ionic_reserve_cq(struct ionic_ibdev *dev, struct ionic_cq *cq, + int spend) +{ + cq->credit -= spend; + + if (cq->credit <= 0) + ionic_reserve_sync_cq(dev, cq); +} + +static int ionic_poll_vcq_cq(struct ionic_ibdev *dev, + struct ionic_cq *cq, + int nwc, struct ib_wc *wc) +{ + struct ionic_qp *qp, *qp_next; + struct ionic_v1_cqe *cqe; + int rc = 0, npolled = 0; + unsigned long irqflags; + u32 qtf, qid; + bool peek; + u8 type; + + if (nwc < 1) + return 0; + + spin_lock_irqsave(&cq->lock, irqflags); + + /* poll already indicated work completions for send queue */ + list_for_each_entry_safe(qp, qp_next, &cq->poll_sq, cq_poll_sq) { + if (npolled == nwc) + goto out; + + spin_lock(&qp->sq_lock); + rc = ionic_poll_send_many(dev, cq, qp, wc + npolled, + nwc - npolled); + spin_unlock(&qp->sq_lock); + + if (rc > 0) + npolled += rc; + + if (npolled < nwc) + list_del_init(&qp->cq_poll_sq); + } + + /* poll for more work completions */ + while (likely(ionic_next_cqe(dev, cq, &cqe))) { + if (npolled == nwc) + goto out; + + qtf = ionic_v1_cqe_qtf(cqe); + qid = ionic_v1_cqe_qtf_qid(qtf); + type = ionic_v1_cqe_qtf_type(qtf); + + /* + * Safe to access QP without additional reference here as, + * 1. We hold cq->lock throughout + * 2. ionic_destroy_qp() acquires the same cq->lock before cleanup + * 3. QP is removed from qp_tbl before any cleanup begins + * This ensures no concurrent access between polling and destruction. + */ + qp = xa_load(&dev->qp_tbl, qid); + if (unlikely(!qp)) { + ibdev_dbg(&dev->ibdev, "missing qp for qid %u\n", qid); + goto cq_next; + } + + switch (type) { + case IONIC_V1_CQE_TYPE_RECV: + spin_lock(&qp->rq_lock); + rc = ionic_poll_recv(dev, cq, qp, cqe, wc + npolled); + spin_unlock(&qp->rq_lock); + + if (rc < 0) + goto out; + + npolled += rc; + + break; + + case IONIC_V1_CQE_TYPE_SEND_MSN: + spin_lock(&qp->sq_lock); + rc = ionic_comp_msn(qp, cqe); + if (!rc) { + rc = ionic_poll_send_many(dev, cq, qp, + wc + npolled, + nwc - npolled); + peek = ionic_peek_send(qp); + } + spin_unlock(&qp->sq_lock); + + if (rc < 0) + goto out; + + npolled += rc; + + if (peek) + list_move_tail(&qp->cq_poll_sq, &cq->poll_sq); + break; + + case IONIC_V1_CQE_TYPE_SEND_NPG: + spin_lock(&qp->sq_lock); + rc = ionic_comp_npg(qp, cqe); + if (!rc) { + rc = ionic_poll_send_many(dev, cq, qp, + wc + npolled, + nwc - npolled); + peek = ionic_peek_send(qp); + } + spin_unlock(&qp->sq_lock); + + if (rc < 0) + goto out; + + npolled += rc; + + if (peek) + list_move_tail(&qp->cq_poll_sq, &cq->poll_sq); + break; + + default: + ibdev_warn(&dev->ibdev, + "unexpected cqe type %u\n", type); + rc = -EIO; + goto out; + } + +cq_next: + ionic_queue_produce(&cq->q); + cq->color = ionic_color_wrap(cq->q.prod, cq->color); + } + + /* lastly, flush send and recv queues */ + if (likely(!cq->flush)) + goto out; + + cq->flush = false; + + list_for_each_entry_safe(qp, qp_next, &cq->flush_sq, cq_flush_sq) { + if (npolled == nwc) + goto out; + + spin_lock(&qp->sq_lock); + rc = ionic_flush_send_many(qp, wc + npolled, nwc - npolled); + spin_unlock(&qp->sq_lock); + + if (rc > 0) + npolled += rc; + + if (npolled < nwc) + list_del_init(&qp->cq_flush_sq); + else + cq->flush = true; + } + + list_for_each_entry_safe(qp, qp_next, &cq->flush_rq, cq_flush_rq) { + if (npolled == nwc) + goto out; + + spin_lock(&qp->rq_lock); + rc = ionic_flush_recv_many(qp, wc + npolled, nwc - npolled); + spin_unlock(&qp->rq_lock); + + if (rc > 0) + npolled += rc; + + if (npolled < nwc) + list_del_init(&qp->cq_flush_rq); + else + cq->flush = true; + } + +out: + /* in case credit was depleted (more work posted than cq depth) */ + if (cq->credit <= 0) + ionic_reserve_sync_cq(dev, cq); + + spin_unlock_irqrestore(&cq->lock, irqflags); + + return npolled ?: rc; +} + +int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device); + struct ionic_vcq *vcq = to_ionic_vcq(ibcq); + int rc_tmp, rc = 0, npolled = 0; + int cq_i, cq_x, cq_ix; + + cq_x = vcq->poll_idx; + vcq->poll_idx ^= dev->lif_cfg.udma_count - 1; + + for (cq_i = 0; npolled < nwc && cq_i < dev->lif_cfg.udma_count; ++cq_i) { + cq_ix = cq_i ^ cq_x; + + if (!(vcq->udma_mask & BIT(cq_ix))) + continue; + + rc_tmp = ionic_poll_vcq_cq(dev, &vcq->cq[cq_ix], + nwc - npolled, + wc + npolled); + + if (rc_tmp >= 0) + npolled += rc_tmp; + else if (!rc) + rc = rc_tmp; + } + + return npolled ?: rc; +} + +static int ionic_req_notify_vcq_cq(struct ionic_ibdev *dev, struct ionic_cq *cq, + enum ib_cq_notify_flags flags) +{ + u64 dbell_val = cq->q.dbell; + + if (flags & IB_CQ_SOLICITED) { + cq->arm_sol_prod = ionic_queue_next(&cq->q, cq->arm_sol_prod); + dbell_val |= cq->arm_sol_prod | IONIC_CQ_RING_SOL; + } else { + cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod); + dbell_val |= cq->arm_any_prod | IONIC_CQ_RING_ARM; + } + + ionic_reserve_sync_cq(dev, cq); + + ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, dbell_val); + + /* + * IB_CQ_REPORT_MISSED_EVENTS: + * + * The queue index in ring zero guarantees no missed events. + * + * Here, we check if the color bit in the next cqe is flipped. If it + * is flipped, then progress can be made by immediately polling the cq. + * Still, the cq will be armed, and an event will be generated. The cq + * may be empty when polled after the event, because the next poll + * after arming the cq can empty it. + */ + return (flags & IB_CQ_REPORT_MISSED_EVENTS) && + cq->color == ionic_v1_cqe_color(ionic_queue_at_prod(&cq->q)); +} + +int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device); + struct ionic_vcq *vcq = to_ionic_vcq(ibcq); + int rc = 0, cq_i; + + for (cq_i = 0; cq_i < dev->lif_cfg.udma_count; ++cq_i) { + if (!(vcq->udma_mask & BIT(cq_i))) + continue; + + if (ionic_req_notify_vcq_cq(dev, &vcq->cq[cq_i], flags)) + rc = 1; + } + + return rc; +} + +static s64 ionic_prep_inline(void *data, u32 max_data, + const struct ib_sge *ib_sgl, int num_sge) +{ + static const s64 bit_31 = 1u << 31; + s64 len = 0, sg_len; + int sg_i; + + for (sg_i = 0; sg_i < num_sge; ++sg_i) { + sg_len = ib_sgl[sg_i].length; + + /* sge length zero means 2GB */ + if (unlikely(sg_len == 0)) + sg_len = bit_31; + + /* greater than max inline data is invalid */ + if (unlikely(len + sg_len > max_data)) + return -EINVAL; + + memcpy(data + len, (void *)ib_sgl[sg_i].addr, sg_len); + + len += sg_len; + } + + return len; +} + +static s64 ionic_prep_pld(struct ionic_v1_wqe *wqe, + union ionic_v1_pld *pld, + int spec, u32 max_sge, + const struct ib_sge *ib_sgl, + int num_sge) +{ + static const s64 bit_31 = 1l << 31; + struct ionic_sge *sgl; + __be32 *spec32 = NULL; + __be16 *spec16 = NULL; + s64 len = 0, sg_len; + int sg_i = 0; + + if (unlikely(num_sge < 0 || (u32)num_sge > max_sge)) + return -EINVAL; + + if (spec && num_sge > IONIC_V1_SPEC_FIRST_SGE) { + sg_i = IONIC_V1_SPEC_FIRST_SGE; + + if (num_sge > 8) { + wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC16); + spec16 = pld->spec16; + } else { + wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC32); + spec32 = pld->spec32; + } + } + + sgl = &pld->sgl[sg_i]; + + for (sg_i = 0; sg_i < num_sge; ++sg_i) { + sg_len = ib_sgl[sg_i].length; + + /* sge length zero means 2GB */ + if (unlikely(sg_len == 0)) + sg_len = bit_31; + + /* greater than 2GB data is invalid */ + if (unlikely(len + sg_len > bit_31)) + return -EINVAL; + + sgl[sg_i].va = cpu_to_be64(ib_sgl[sg_i].addr); + sgl[sg_i].len = cpu_to_be32(sg_len); + sgl[sg_i].lkey = cpu_to_be32(ib_sgl[sg_i].lkey); + + if (spec32) { + spec32[sg_i] = sgl[sg_i].len; + } else if (spec16) { + if (unlikely(sg_len > U16_MAX)) + return -EINVAL; + spec16[sg_i] = cpu_to_be16(sg_len); + } + + len += sg_len; + } + + return len; +} + +static void ionic_prep_base(struct ionic_qp *qp, + const struct ib_send_wr *wr, + struct ionic_sq_meta *meta, + struct ionic_v1_wqe *wqe) +{ + meta->wrid = wr->wr_id; + meta->ibsts = IB_WC_SUCCESS; + meta->signal = false; + meta->local_comp = false; + + wqe->base.wqe_id = qp->sq.prod; + + if (wr->send_flags & IB_SEND_FENCE) + wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_FENCE); + + if (wr->send_flags & IB_SEND_SOLICITED) + wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SOL); + + if (qp->sig_all || wr->send_flags & IB_SEND_SIGNALED) { + wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SIG); + meta->signal = true; + } + + meta->seq = qp->sq_msn_prod; + meta->remote = + qp->ibqp.qp_type != IB_QPT_UD && + qp->ibqp.qp_type != IB_QPT_GSI && + !ionic_ibop_is_local(wr->opcode); + + if (meta->remote) { + qp->sq_msn_idx[meta->seq] = qp->sq.prod; + qp->sq_msn_prod = ionic_queue_next(&qp->sq, qp->sq_msn_prod); + } + + ionic_queue_produce(&qp->sq); +} + +static int ionic_prep_common(struct ionic_qp *qp, + const struct ib_send_wr *wr, + struct ionic_sq_meta *meta, + struct ionic_v1_wqe *wqe) +{ + s64 signed_len; + u32 mval; + + if (wr->send_flags & IB_SEND_INLINE) { + wqe->base.num_sge_key = 0; + wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_INL); + mval = ionic_v1_send_wqe_max_data(qp->sq.stride_log2, false); + signed_len = ionic_prep_inline(wqe->common.pld.data, mval, + wr->sg_list, wr->num_sge); + } else { + wqe->base.num_sge_key = wr->num_sge; + mval = ionic_v1_send_wqe_max_sge(qp->sq.stride_log2, + qp->sq_spec, + false); + signed_len = ionic_prep_pld(wqe, &wqe->common.pld, + qp->sq_spec, mval, + wr->sg_list, wr->num_sge); + } + + if (unlikely(signed_len < 0)) + return signed_len; + + meta->len = signed_len; + wqe->common.length = cpu_to_be32(signed_len); + + ionic_prep_base(qp, wr, meta, wqe); + + return 0; +} + +static void ionic_prep_sq_wqe(struct ionic_qp *qp, void *wqe) +{ + memset(wqe, 0, 1u << qp->sq.stride_log2); +} + +static void ionic_prep_rq_wqe(struct ionic_qp *qp, void *wqe) +{ + memset(wqe, 0, 1u << qp->rq.stride_log2); +} + +static int ionic_prep_send(struct ionic_qp *qp, + const struct ib_send_wr *wr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device); + struct ionic_sq_meta *meta; + struct ionic_v1_wqe *wqe; + + meta = &qp->sq_meta[qp->sq.prod]; + wqe = ionic_queue_at_prod(&qp->sq); + + ionic_prep_sq_wqe(qp, wqe); + + meta->ibop = IB_WC_SEND; + + switch (wr->opcode) { + case IB_WR_SEND: + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND); + break; + case IB_WR_SEND_WITH_IMM: + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM); + wqe->base.imm_data_key = wr->ex.imm_data; + break; + case IB_WR_SEND_WITH_INV: + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_INV); + wqe->base.imm_data_key = + cpu_to_be32(wr->ex.invalidate_rkey); + break; + default: + return -EINVAL; + } + + return ionic_prep_common(qp, wr, meta, wqe); +} + +static int ionic_prep_send_ud(struct ionic_qp *qp, + const struct ib_ud_wr *wr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device); + struct ionic_sq_meta *meta; + struct ionic_v1_wqe *wqe; + struct ionic_ah *ah; + + if (unlikely(!wr->ah)) + return -EINVAL; + + ah = to_ionic_ah(wr->ah); + + meta = &qp->sq_meta[qp->sq.prod]; + wqe = ionic_queue_at_prod(&qp->sq); + + ionic_prep_sq_wqe(qp, wqe); + + wqe->common.send.ah_id = cpu_to_be32(ah->ahid); + wqe->common.send.dest_qpn = cpu_to_be32(wr->remote_qpn); + wqe->common.send.dest_qkey = cpu_to_be32(wr->remote_qkey); + + meta->ibop = IB_WC_SEND; + + switch (wr->wr.opcode) { + case IB_WR_SEND: + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND); + break; + case IB_WR_SEND_WITH_IMM: + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM); + wqe->base.imm_data_key = wr->wr.ex.imm_data; + break; + default: + return -EINVAL; + } + + return ionic_prep_common(qp, &wr->wr, meta, wqe); +} + +static int ionic_prep_rdma(struct ionic_qp *qp, + const struct ib_rdma_wr *wr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device); + struct ionic_sq_meta *meta; + struct ionic_v1_wqe *wqe; + + meta = &qp->sq_meta[qp->sq.prod]; + wqe = ionic_queue_at_prod(&qp->sq); + + ionic_prep_sq_wqe(qp, wqe); + + meta->ibop = IB_WC_RDMA_WRITE; + + switch (wr->wr.opcode) { + case IB_WR_RDMA_READ: + if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE)) + return -EINVAL; + meta->ibop = IB_WC_RDMA_READ; + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_READ); + break; + case IB_WR_RDMA_WRITE: + if (wr->wr.send_flags & IB_SEND_SOLICITED) + return -EINVAL; + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE); + break; + case IB_WR_RDMA_WRITE_WITH_IMM: + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE_IMM); + wqe->base.imm_data_key = wr->wr.ex.imm_data; + break; + default: + return -EINVAL; + } + + wqe->common.rdma.remote_va_high = cpu_to_be32(wr->remote_addr >> 32); + wqe->common.rdma.remote_va_low = cpu_to_be32(wr->remote_addr); + wqe->common.rdma.remote_rkey = cpu_to_be32(wr->rkey); + + return ionic_prep_common(qp, &wr->wr, meta, wqe); +} + +static int ionic_prep_atomic(struct ionic_qp *qp, + const struct ib_atomic_wr *wr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device); + struct ionic_sq_meta *meta; + struct ionic_v1_wqe *wqe; + + if (wr->wr.num_sge != 1 || wr->wr.sg_list[0].length != 8) + return -EINVAL; + + if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE)) + return -EINVAL; + + meta = &qp->sq_meta[qp->sq.prod]; + wqe = ionic_queue_at_prod(&qp->sq); + + ionic_prep_sq_wqe(qp, wqe); + + meta->ibop = IB_WC_RDMA_WRITE; + + switch (wr->wr.opcode) { + case IB_WR_ATOMIC_CMP_AND_SWP: + meta->ibop = IB_WC_COMP_SWAP; + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_CS); + wqe->atomic.swap_add_high = cpu_to_be32(wr->swap >> 32); + wqe->atomic.swap_add_low = cpu_to_be32(wr->swap); + wqe->atomic.compare_high = cpu_to_be32(wr->compare_add >> 32); + wqe->atomic.compare_low = cpu_to_be32(wr->compare_add); + break; + case IB_WR_ATOMIC_FETCH_AND_ADD: + meta->ibop = IB_WC_FETCH_ADD; + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_FA); + wqe->atomic.swap_add_high = cpu_to_be32(wr->compare_add >> 32); + wqe->atomic.swap_add_low = cpu_to_be32(wr->compare_add); + break; + default: + return -EINVAL; + } + + wqe->atomic.remote_va_high = cpu_to_be32(wr->remote_addr >> 32); + wqe->atomic.remote_va_low = cpu_to_be32(wr->remote_addr); + wqe->atomic.remote_rkey = cpu_to_be32(wr->rkey); + + wqe->base.num_sge_key = 1; + wqe->atomic.sge.va = cpu_to_be64(wr->wr.sg_list[0].addr); + wqe->atomic.sge.len = cpu_to_be32(8); + wqe->atomic.sge.lkey = cpu_to_be32(wr->wr.sg_list[0].lkey); + + return ionic_prep_common(qp, &wr->wr, meta, wqe); +} + +static int ionic_prep_inv(struct ionic_qp *qp, + const struct ib_send_wr *wr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device); + struct ionic_sq_meta *meta; + struct ionic_v1_wqe *wqe; + + if (wr->send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE)) + return -EINVAL; + + meta = &qp->sq_meta[qp->sq.prod]; + wqe = ionic_queue_at_prod(&qp->sq); + + ionic_prep_sq_wqe(qp, wqe); + + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, LOCAL_INV); + wqe->base.imm_data_key = cpu_to_be32(wr->ex.invalidate_rkey); + + meta->len = 0; + meta->ibop = IB_WC_LOCAL_INV; + + ionic_prep_base(qp, wr, meta, wqe); + + return 0; +} + +static int ionic_prep_reg(struct ionic_qp *qp, + const struct ib_reg_wr *wr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device); + struct ionic_mr *mr = to_ionic_mr(wr->mr); + struct ionic_sq_meta *meta; + struct ionic_v1_wqe *wqe; + __le64 dma_addr; + int flags; + + if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE)) + return -EINVAL; + + /* must call ib_map_mr_sg before posting reg wr */ + if (!mr->buf.tbl_pages) + return -EINVAL; + + meta = &qp->sq_meta[qp->sq.prod]; + wqe = ionic_queue_at_prod(&qp->sq); + + ionic_prep_sq_wqe(qp, wqe); + + flags = to_ionic_mr_flags(wr->access); + + wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, REG_MR); + wqe->base.num_sge_key = wr->key; + wqe->base.imm_data_key = cpu_to_be32(mr->ibmr.lkey); + wqe->reg_mr.va = cpu_to_be64(mr->ibmr.iova); + wqe->reg_mr.length = cpu_to_be64(mr->ibmr.length); + wqe->reg_mr.offset = ionic_pgtbl_off(&mr->buf, mr->ibmr.iova); + dma_addr = ionic_pgtbl_dma(&mr->buf, mr->ibmr.iova); + wqe->reg_mr.dma_addr = cpu_to_be64(le64_to_cpu(dma_addr)); + + wqe->reg_mr.map_count = cpu_to_be32(mr->buf.tbl_pages); + wqe->reg_mr.flags = cpu_to_be16(flags); + wqe->reg_mr.dir_size_log2 = 0; + wqe->reg_mr.page_size_log2 = order_base_2(mr->ibmr.page_size); + + meta->len = 0; + meta->ibop = IB_WC_REG_MR; + + ionic_prep_base(qp, &wr->wr, meta, wqe); + + return 0; +} + +static int ionic_prep_one_rc(struct ionic_qp *qp, + const struct ib_send_wr *wr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device); + int rc = 0; + + switch (wr->opcode) { + case IB_WR_SEND: + case IB_WR_SEND_WITH_IMM: + case IB_WR_SEND_WITH_INV: + rc = ionic_prep_send(qp, wr); + break; + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + rc = ionic_prep_rdma(qp, rdma_wr(wr)); + break; + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + rc = ionic_prep_atomic(qp, atomic_wr(wr)); + break; + case IB_WR_LOCAL_INV: + rc = ionic_prep_inv(qp, wr); + break; + case IB_WR_REG_MR: + rc = ionic_prep_reg(qp, reg_wr(wr)); + break; + default: + ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode); + rc = -EINVAL; + } + + return rc; +} + +static int ionic_prep_one_ud(struct ionic_qp *qp, + const struct ib_send_wr *wr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device); + int rc = 0; + + switch (wr->opcode) { + case IB_WR_SEND: + case IB_WR_SEND_WITH_IMM: + rc = ionic_prep_send_ud(qp, ud_wr(wr)); + break; + default: + ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode); + rc = -EINVAL; + } + + return rc; +} + +static int ionic_prep_recv(struct ionic_qp *qp, + const struct ib_recv_wr *wr) +{ + struct ionic_rq_meta *meta; + struct ionic_v1_wqe *wqe; + s64 signed_len; + u32 mval; + + wqe = ionic_queue_at_prod(&qp->rq); + + /* if wqe is owned by device, caller can try posting again soon */ + if (wqe->base.flags & cpu_to_be16(IONIC_V1_FLAG_FENCE)) + return -EAGAIN; + + meta = qp->rq_meta_head; + if (unlikely(meta == IONIC_META_LAST) || + unlikely(meta == IONIC_META_POSTED)) + return -EIO; + + ionic_prep_rq_wqe(qp, wqe); + + mval = ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, qp->rq_spec, + false); + signed_len = ionic_prep_pld(wqe, &wqe->recv.pld, + qp->rq_spec, mval, + wr->sg_list, wr->num_sge); + if (signed_len < 0) + return signed_len; + + meta->wrid = wr->wr_id; + + wqe->base.wqe_id = meta - qp->rq_meta; + wqe->base.num_sge_key = wr->num_sge; + + /* total length for recv goes in base imm_data_key */ + wqe->base.imm_data_key = cpu_to_be32(signed_len); + + ionic_queue_produce(&qp->rq); + + qp->rq_meta_head = meta->next; + meta->next = IONIC_META_POSTED; + + return 0; +} + +static int ionic_post_send_common(struct ionic_ibdev *dev, + struct ionic_vcq *vcq, + struct ionic_cq *cq, + struct ionic_qp *qp, + const struct ib_send_wr *wr, + const struct ib_send_wr **bad) +{ + unsigned long irqflags; + bool notify = false; + int spend, rc = 0; + + if (!bad) + return -EINVAL; + + if (!qp->has_sq) { + *bad = wr; + return -EINVAL; + } + + if (qp->state < IB_QPS_RTS) { + *bad = wr; + return -EINVAL; + } + + spin_lock_irqsave(&qp->sq_lock, irqflags); + + while (wr) { + if (ionic_queue_full(&qp->sq)) { + ibdev_dbg(&dev->ibdev, "queue full"); + rc = -ENOMEM; + goto out; + } + + if (qp->ibqp.qp_type == IB_QPT_UD || + qp->ibqp.qp_type == IB_QPT_GSI) + rc = ionic_prep_one_ud(qp, wr); + else + rc = ionic_prep_one_rc(qp, wr); + if (rc) + goto out; + + wr = wr->next; + } + +out: + spin_unlock_irqrestore(&qp->sq_lock, irqflags); + + spin_lock_irqsave(&cq->lock, irqflags); + spin_lock(&qp->sq_lock); + + if (likely(qp->sq.prod != qp->sq_old_prod)) { + /* ring cq doorbell just in time */ + spend = (qp->sq.prod - qp->sq_old_prod) & qp->sq.mask; + ionic_reserve_cq(dev, cq, spend); + + qp->sq_old_prod = qp->sq.prod; + + ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.sq_qtype, + ionic_queue_dbell_val(&qp->sq)); + } + + if (qp->sq_flush) { + notify = true; + cq->flush = true; + list_move_tail(&qp->cq_flush_sq, &cq->flush_sq); + } + + spin_unlock(&qp->sq_lock); + spin_unlock_irqrestore(&cq->lock, irqflags); + + if (notify && vcq->ibcq.comp_handler) + vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context); + + *bad = wr; + return rc; +} + +static int ionic_post_recv_common(struct ionic_ibdev *dev, + struct ionic_vcq *vcq, + struct ionic_cq *cq, + struct ionic_qp *qp, + const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad) +{ + unsigned long irqflags; + bool notify = false; + int spend, rc = 0; + + if (!bad) + return -EINVAL; + + if (!qp->has_rq) { + *bad = wr; + return -EINVAL; + } + + if (qp->state < IB_QPS_INIT) { + *bad = wr; + return -EINVAL; + } + + spin_lock_irqsave(&qp->rq_lock, irqflags); + + while (wr) { + if (ionic_queue_full(&qp->rq)) { + ibdev_dbg(&dev->ibdev, "queue full"); + rc = -ENOMEM; + goto out; + } + + rc = ionic_prep_recv(qp, wr); + if (rc) + goto out; + + wr = wr->next; + } + +out: + if (!cq) { + spin_unlock_irqrestore(&qp->rq_lock, irqflags); + goto out_unlocked; + } + spin_unlock_irqrestore(&qp->rq_lock, irqflags); + + spin_lock_irqsave(&cq->lock, irqflags); + spin_lock(&qp->rq_lock); + + if (likely(qp->rq.prod != qp->rq_old_prod)) { + /* ring cq doorbell just in time */ + spend = (qp->rq.prod - qp->rq_old_prod) & qp->rq.mask; + ionic_reserve_cq(dev, cq, spend); + + qp->rq_old_prod = qp->rq.prod; + + ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.rq_qtype, + ionic_queue_dbell_val(&qp->rq)); + } + + if (qp->rq_flush) { + notify = true; + cq->flush = true; + list_move_tail(&qp->cq_flush_rq, &cq->flush_rq); + } + + spin_unlock(&qp->rq_lock); + spin_unlock_irqrestore(&cq->lock, irqflags); + + if (notify && vcq->ibcq.comp_handler) + vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context); + +out_unlocked: + *bad = wr; + return rc; +} + +int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device); + struct ionic_vcq *vcq = to_ionic_vcq(ibqp->send_cq); + struct ionic_qp *qp = to_ionic_qp(ibqp); + struct ionic_cq *cq = + to_ionic_vcq_cq(ibqp->send_cq, qp->udma_idx); + + return ionic_post_send_common(dev, vcq, cq, qp, wr, bad); +} + +int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device); + struct ionic_vcq *vcq = to_ionic_vcq(ibqp->recv_cq); + struct ionic_qp *qp = to_ionic_qp(ibqp); + struct ionic_cq *cq = + to_ionic_vcq_cq(ibqp->recv_cq, qp->udma_idx); + + return ionic_post_recv_common(dev, vcq, cq, qp, wr, bad); +} diff --git a/drivers/infiniband/hw/ionic/ionic_fw.h b/drivers/infiniband/hw/ionic/ionic_fw.h new file mode 100644 index 000000000000..adfbb89d856c --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_fw.h @@ -0,0 +1,1029 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#ifndef _IONIC_FW_H_ +#define _IONIC_FW_H_ + +#include <linux/kernel.h> +#include <rdma/ib_verbs.h> + +/* common for ib spec */ + +#define IONIC_EXP_DBELL_SZ 8 + +enum ionic_mrid_bits { + IONIC_MRID_INDEX_SHIFT = 8, +}; + +static inline u32 ionic_mrid(u32 index, u8 key) +{ + return (index << IONIC_MRID_INDEX_SHIFT) | key; +} + +static inline u32 ionic_mrid_index(u32 lrkey) +{ + return lrkey >> IONIC_MRID_INDEX_SHIFT; +} + +/* common to all versions */ + +/* wqe scatter gather element */ +struct ionic_sge { + __be64 va; + __be32 len; + __be32 lkey; +}; + +/* admin queue mr type */ +enum ionic_mr_flags { + /* bits that determine mr access */ + IONIC_MRF_LOCAL_WRITE = BIT(0), + IONIC_MRF_REMOTE_WRITE = BIT(1), + IONIC_MRF_REMOTE_READ = BIT(2), + IONIC_MRF_REMOTE_ATOMIC = BIT(3), + IONIC_MRF_MW_BIND = BIT(4), + IONIC_MRF_ZERO_BASED = BIT(5), + IONIC_MRF_ON_DEMAND = BIT(6), + IONIC_MRF_PB = BIT(7), + IONIC_MRF_ACCESS_MASK = BIT(12) - 1, + + /* bits that determine mr type */ + IONIC_MRF_UKEY_EN = BIT(13), + IONIC_MRF_IS_MW = BIT(14), + IONIC_MRF_INV_EN = BIT(15), + + /* base flags combinations for mr types */ + IONIC_MRF_USER_MR = 0, + IONIC_MRF_PHYS_MR = (IONIC_MRF_UKEY_EN | + IONIC_MRF_INV_EN), + IONIC_MRF_MW_1 = (IONIC_MRF_UKEY_EN | + IONIC_MRF_IS_MW), + IONIC_MRF_MW_2 = (IONIC_MRF_UKEY_EN | + IONIC_MRF_IS_MW | + IONIC_MRF_INV_EN), +}; + +static inline int to_ionic_mr_flags(int access) +{ + int flags = 0; + + if (access & IB_ACCESS_LOCAL_WRITE) + flags |= IONIC_MRF_LOCAL_WRITE; + + if (access & IB_ACCESS_REMOTE_READ) + flags |= IONIC_MRF_REMOTE_READ; + + if (access & IB_ACCESS_REMOTE_WRITE) + flags |= IONIC_MRF_REMOTE_WRITE; + + if (access & IB_ACCESS_REMOTE_ATOMIC) + flags |= IONIC_MRF_REMOTE_ATOMIC; + + if (access & IB_ACCESS_MW_BIND) + flags |= IONIC_MRF_MW_BIND; + + if (access & IB_ZERO_BASED) + flags |= IONIC_MRF_ZERO_BASED; + + return flags; +} + +enum ionic_qp_flags { + /* bits that determine qp access */ + IONIC_QPF_REMOTE_WRITE = BIT(0), + IONIC_QPF_REMOTE_READ = BIT(1), + IONIC_QPF_REMOTE_ATOMIC = BIT(2), + + /* bits that determine other qp behavior */ + IONIC_QPF_SQ_PB = BIT(6), + IONIC_QPF_RQ_PB = BIT(7), + IONIC_QPF_SQ_SPEC = BIT(8), + IONIC_QPF_RQ_SPEC = BIT(9), + IONIC_QPF_REMOTE_PRIVILEGED = BIT(10), + IONIC_QPF_SQ_DRAINING = BIT(11), + IONIC_QPF_SQD_NOTIFY = BIT(12), + IONIC_QPF_SQ_CMB = BIT(13), + IONIC_QPF_RQ_CMB = BIT(14), + IONIC_QPF_PRIVILEGED = BIT(15), +}; + +static inline int from_ionic_qp_flags(int flags) +{ + int access_flags = 0; + + if (flags & IONIC_QPF_REMOTE_WRITE) + access_flags |= IB_ACCESS_REMOTE_WRITE; + + if (flags & IONIC_QPF_REMOTE_READ) + access_flags |= IB_ACCESS_REMOTE_READ; + + if (flags & IONIC_QPF_REMOTE_ATOMIC) + access_flags |= IB_ACCESS_REMOTE_ATOMIC; + + return access_flags; +} + +static inline int to_ionic_qp_flags(int access, bool sqd_notify, + bool sq_is_cmb, bool rq_is_cmb, + bool sq_spec, bool rq_spec, + bool privileged, bool remote_privileged) +{ + int flags = 0; + + if (access & IB_ACCESS_REMOTE_WRITE) + flags |= IONIC_QPF_REMOTE_WRITE; + + if (access & IB_ACCESS_REMOTE_READ) + flags |= IONIC_QPF_REMOTE_READ; + + if (access & IB_ACCESS_REMOTE_ATOMIC) + flags |= IONIC_QPF_REMOTE_ATOMIC; + + if (sqd_notify) + flags |= IONIC_QPF_SQD_NOTIFY; + + if (sq_is_cmb) + flags |= IONIC_QPF_SQ_CMB; + + if (rq_is_cmb) + flags |= IONIC_QPF_RQ_CMB; + + if (sq_spec) + flags |= IONIC_QPF_SQ_SPEC; + + if (rq_spec) + flags |= IONIC_QPF_RQ_SPEC; + + if (privileged) + flags |= IONIC_QPF_PRIVILEGED; + + if (remote_privileged) + flags |= IONIC_QPF_REMOTE_PRIVILEGED; + + return flags; +} + +/* cqe non-admin status indicated in status_length field when err bit is set */ +enum ionic_status { + IONIC_STS_OK, + IONIC_STS_LOCAL_LEN_ERR, + IONIC_STS_LOCAL_QP_OPER_ERR, + IONIC_STS_LOCAL_PROT_ERR, + IONIC_STS_WQE_FLUSHED_ERR, + IONIC_STS_MEM_MGMT_OPER_ERR, + IONIC_STS_BAD_RESP_ERR, + IONIC_STS_LOCAL_ACC_ERR, + IONIC_STS_REMOTE_INV_REQ_ERR, + IONIC_STS_REMOTE_ACC_ERR, + IONIC_STS_REMOTE_OPER_ERR, + IONIC_STS_RETRY_EXCEEDED, + IONIC_STS_RNR_RETRY_EXCEEDED, + IONIC_STS_XRC_VIO_ERR, + IONIC_STS_LOCAL_SGL_INV_ERR, +}; + +static inline int ionic_to_ib_status(int sts) +{ + switch (sts) { + case IONIC_STS_OK: + return IB_WC_SUCCESS; + case IONIC_STS_LOCAL_LEN_ERR: + return IB_WC_LOC_LEN_ERR; + case IONIC_STS_LOCAL_QP_OPER_ERR: + case IONIC_STS_LOCAL_SGL_INV_ERR: + return IB_WC_LOC_QP_OP_ERR; + case IONIC_STS_LOCAL_PROT_ERR: + return IB_WC_LOC_PROT_ERR; + case IONIC_STS_WQE_FLUSHED_ERR: + return IB_WC_WR_FLUSH_ERR; + case IONIC_STS_MEM_MGMT_OPER_ERR: + return IB_WC_MW_BIND_ERR; + case IONIC_STS_BAD_RESP_ERR: + return IB_WC_BAD_RESP_ERR; + case IONIC_STS_LOCAL_ACC_ERR: + return IB_WC_LOC_ACCESS_ERR; + case IONIC_STS_REMOTE_INV_REQ_ERR: + return IB_WC_REM_INV_REQ_ERR; + case IONIC_STS_REMOTE_ACC_ERR: + return IB_WC_REM_ACCESS_ERR; + case IONIC_STS_REMOTE_OPER_ERR: + return IB_WC_REM_OP_ERR; + case IONIC_STS_RETRY_EXCEEDED: + return IB_WC_RETRY_EXC_ERR; + case IONIC_STS_RNR_RETRY_EXCEEDED: + return IB_WC_RNR_RETRY_EXC_ERR; + case IONIC_STS_XRC_VIO_ERR: + default: + return IB_WC_GENERAL_ERR; + } +} + +/* admin queue qp type */ +enum ionic_qp_type { + IONIC_QPT_RC, + IONIC_QPT_UC, + IONIC_QPT_RD, + IONIC_QPT_UD, + IONIC_QPT_SRQ, + IONIC_QPT_XRC_INI, + IONIC_QPT_XRC_TGT, + IONIC_QPT_XRC_SRQ, +}; + +static inline int to_ionic_qp_type(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_GSI: + case IB_QPT_UD: + return IONIC_QPT_UD; + case IB_QPT_RC: + return IONIC_QPT_RC; + case IB_QPT_UC: + return IONIC_QPT_UC; + case IB_QPT_XRC_INI: + return IONIC_QPT_XRC_INI; + case IB_QPT_XRC_TGT: + return IONIC_QPT_XRC_TGT; + default: + return -EINVAL; + } +} + +/* admin queue qp state */ +enum ionic_qp_state { + IONIC_QPS_RESET, + IONIC_QPS_INIT, + IONIC_QPS_RTR, + IONIC_QPS_RTS, + IONIC_QPS_SQD, + IONIC_QPS_SQE, + IONIC_QPS_ERR, +}; + +static inline int from_ionic_qp_state(enum ionic_qp_state state) +{ + switch (state) { + case IONIC_QPS_RESET: + return IB_QPS_RESET; + case IONIC_QPS_INIT: + return IB_QPS_INIT; + case IONIC_QPS_RTR: + return IB_QPS_RTR; + case IONIC_QPS_RTS: + return IB_QPS_RTS; + case IONIC_QPS_SQD: + return IB_QPS_SQD; + case IONIC_QPS_SQE: + return IB_QPS_SQE; + case IONIC_QPS_ERR: + return IB_QPS_ERR; + default: + return -EINVAL; + } +} + +static inline int to_ionic_qp_state(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: + return IONIC_QPS_RESET; + case IB_QPS_INIT: + return IONIC_QPS_INIT; + case IB_QPS_RTR: + return IONIC_QPS_RTR; + case IB_QPS_RTS: + return IONIC_QPS_RTS; + case IB_QPS_SQD: + return IONIC_QPS_SQD; + case IB_QPS_SQE: + return IONIC_QPS_SQE; + case IB_QPS_ERR: + return IONIC_QPS_ERR; + default: + return 0; + } +} + +static inline int to_ionic_qp_modify_state(enum ib_qp_state to_state, + enum ib_qp_state from_state) +{ + return to_ionic_qp_state(to_state) | + (to_ionic_qp_state(from_state) << 4); +} + +/* fw abi v1 */ + +/* data payload part of v1 wqe */ +union ionic_v1_pld { + struct ionic_sge sgl[2]; + __be32 spec32[8]; + __be16 spec16[16]; + __u8 data[32]; +}; + +/* completion queue v1 cqe */ +struct ionic_v1_cqe { + union { + struct { + __be16 cmd_idx; + __u8 cmd_op; + __u8 rsvd[17]; + __le16 old_sq_cindex; + __le16 old_rq_cq_cindex; + } admin; + struct { + __u64 wqe_id; + __be32 src_qpn_op; + __u8 src_mac[6]; + __be16 vlan_tag; + __be32 imm_data_rkey; + } recv; + struct { + __u8 rsvd[4]; + __be32 msg_msn; + __u8 rsvd2[8]; + __u64 npg_wqe_id; + } send; + }; + __be32 status_length; + __be32 qid_type_flags; +}; + +/* bits for cqe recv */ +enum ionic_v1_cqe_src_qpn_bits { + IONIC_V1_CQE_RECV_QPN_MASK = 0xffffff, + IONIC_V1_CQE_RECV_OP_SHIFT = 24, + + /* MASK could be 0x3, but need 0x1f for makeshift values: + * OP_TYPE_RDMA_OPER_WITH_IMM, OP_TYPE_SEND_RCVD + */ + IONIC_V1_CQE_RECV_OP_MASK = 0x1f, + IONIC_V1_CQE_RECV_OP_SEND = 0, + IONIC_V1_CQE_RECV_OP_SEND_INV = 1, + IONIC_V1_CQE_RECV_OP_SEND_IMM = 2, + IONIC_V1_CQE_RECV_OP_RDMA_IMM = 3, + + IONIC_V1_CQE_RECV_IS_IPV4 = BIT(7 + IONIC_V1_CQE_RECV_OP_SHIFT), + IONIC_V1_CQE_RECV_IS_VLAN = BIT(6 + IONIC_V1_CQE_RECV_OP_SHIFT), +}; + +/* bits for cqe qid_type_flags */ +enum ionic_v1_cqe_qtf_bits { + IONIC_V1_CQE_COLOR = BIT(0), + IONIC_V1_CQE_ERROR = BIT(1), + IONIC_V1_CQE_TYPE_SHIFT = 5, + IONIC_V1_CQE_TYPE_MASK = 0x7, + IONIC_V1_CQE_QID_SHIFT = 8, + + IONIC_V1_CQE_TYPE_ADMIN = 0, + IONIC_V1_CQE_TYPE_RECV = 1, + IONIC_V1_CQE_TYPE_SEND_MSN = 2, + IONIC_V1_CQE_TYPE_SEND_NPG = 3, +}; + +static inline bool ionic_v1_cqe_color(struct ionic_v1_cqe *cqe) +{ + return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_COLOR); +} + +static inline bool ionic_v1_cqe_error(struct ionic_v1_cqe *cqe) +{ + return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_ERROR); +} + +static inline bool ionic_v1_cqe_recv_is_ipv4(struct ionic_v1_cqe *cqe) +{ + return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_IPV4); +} + +static inline bool ionic_v1_cqe_recv_is_vlan(struct ionic_v1_cqe *cqe) +{ + return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_VLAN); +} + +static inline void ionic_v1_cqe_clean(struct ionic_v1_cqe *cqe) +{ + cqe->qid_type_flags |= cpu_to_be32(~0u << IONIC_V1_CQE_QID_SHIFT); +} + +static inline u32 ionic_v1_cqe_qtf(struct ionic_v1_cqe *cqe) +{ + return be32_to_cpu(cqe->qid_type_flags); +} + +static inline u8 ionic_v1_cqe_qtf_type(u32 qtf) +{ + return (qtf >> IONIC_V1_CQE_TYPE_SHIFT) & IONIC_V1_CQE_TYPE_MASK; +} + +static inline u32 ionic_v1_cqe_qtf_qid(u32 qtf) +{ + return qtf >> IONIC_V1_CQE_QID_SHIFT; +} + +/* v1 base wqe header */ +struct ionic_v1_base_hdr { + __u64 wqe_id; + __u8 op; + __u8 num_sge_key; + __be16 flags; + __be32 imm_data_key; +}; + +/* v1 receive wqe body */ +struct ionic_v1_recv_bdy { + __u8 rsvd[16]; + union ionic_v1_pld pld; +}; + +/* v1 send/rdma wqe body (common, has sgl) */ +struct ionic_v1_common_bdy { + union { + struct { + __be32 ah_id; + __be32 dest_qpn; + __be32 dest_qkey; + } send; + struct { + __be32 remote_va_high; + __be32 remote_va_low; + __be32 remote_rkey; + } rdma; + }; + __be32 length; + union ionic_v1_pld pld; +}; + +/* v1 atomic wqe body */ +struct ionic_v1_atomic_bdy { + __be32 remote_va_high; + __be32 remote_va_low; + __be32 remote_rkey; + __be32 swap_add_high; + __be32 swap_add_low; + __be32 compare_high; + __be32 compare_low; + __u8 rsvd[4]; + struct ionic_sge sge; +}; + +/* v1 reg mr wqe body */ +struct ionic_v1_reg_mr_bdy { + __be64 va; + __be64 length; + __be64 offset; + __be64 dma_addr; + __be32 map_count; + __be16 flags; + __u8 dir_size_log2; + __u8 page_size_log2; + __u8 rsvd[8]; +}; + +/* v1 bind mw wqe body */ +struct ionic_v1_bind_mw_bdy { + __be64 va; + __be64 length; + __be32 lkey; + __be16 flags; + __u8 rsvd[26]; +}; + +/* v1 send/recv wqe */ +struct ionic_v1_wqe { + struct ionic_v1_base_hdr base; + union { + struct ionic_v1_recv_bdy recv; + struct ionic_v1_common_bdy common; + struct ionic_v1_atomic_bdy atomic; + struct ionic_v1_reg_mr_bdy reg_mr; + struct ionic_v1_bind_mw_bdy bind_mw; + }; +}; + +/* queue pair v1 send opcodes */ +enum ionic_v1_op { + IONIC_V1_OP_SEND, + IONIC_V1_OP_SEND_INV, + IONIC_V1_OP_SEND_IMM, + IONIC_V1_OP_RDMA_READ, + IONIC_V1_OP_RDMA_WRITE, + IONIC_V1_OP_RDMA_WRITE_IMM, + IONIC_V1_OP_ATOMIC_CS, + IONIC_V1_OP_ATOMIC_FA, + IONIC_V1_OP_REG_MR, + IONIC_V1_OP_LOCAL_INV, + IONIC_V1_OP_BIND_MW, + + /* flags */ + IONIC_V1_FLAG_FENCE = BIT(0), + IONIC_V1_FLAG_SOL = BIT(1), + IONIC_V1_FLAG_INL = BIT(2), + IONIC_V1_FLAG_SIG = BIT(3), + + /* flags last four bits for sgl spec format */ + IONIC_V1_FLAG_SPEC32 = (1u << 12), + IONIC_V1_FLAG_SPEC16 = (2u << 12), + IONIC_V1_SPEC_FIRST_SGE = 2, +}; + +/* queue pair v2 send opcodes */ +enum ionic_v2_op { + IONIC_V2_OPSL_OUT = 0x20, + IONIC_V2_OPSL_IMM = 0x40, + IONIC_V2_OPSL_INV = 0x80, + + IONIC_V2_OP_SEND = 0x0 | IONIC_V2_OPSL_OUT, + IONIC_V2_OP_SEND_IMM = IONIC_V2_OP_SEND | IONIC_V2_OPSL_IMM, + IONIC_V2_OP_SEND_INV = IONIC_V2_OP_SEND | IONIC_V2_OPSL_INV, + + IONIC_V2_OP_RDMA_WRITE = 0x1 | IONIC_V2_OPSL_OUT, + IONIC_V2_OP_RDMA_WRITE_IMM = IONIC_V2_OP_RDMA_WRITE | IONIC_V2_OPSL_IMM, + + IONIC_V2_OP_RDMA_READ = 0x2, + + IONIC_V2_OP_ATOMIC_CS = 0x4, + IONIC_V2_OP_ATOMIC_FA = 0x5, + IONIC_V2_OP_REG_MR = 0x6, + IONIC_V2_OP_LOCAL_INV = 0x7, + IONIC_V2_OP_BIND_MW = 0x8, +}; + +static inline size_t ionic_v1_send_wqe_min_size(int min_sge, int min_data, + int spec, bool expdb) +{ + size_t sz_wqe, sz_sgl, sz_data; + + if (spec > IONIC_V1_SPEC_FIRST_SGE) + min_sge += IONIC_V1_SPEC_FIRST_SGE; + + if (expdb) { + min_sge += 1; + min_data += IONIC_EXP_DBELL_SZ; + } + + sz_wqe = sizeof(struct ionic_v1_wqe); + sz_sgl = offsetof(struct ionic_v1_wqe, common.pld.sgl[min_sge]); + sz_data = offsetof(struct ionic_v1_wqe, common.pld.data[min_data]); + + if (sz_sgl > sz_wqe) + sz_wqe = sz_sgl; + + if (sz_data > sz_wqe) + sz_wqe = sz_data; + + return sz_wqe; +} + +static inline int ionic_v1_send_wqe_max_sge(u8 stride_log2, int spec, + bool expdb) +{ + struct ionic_sge *sge = (void *)(1ull << stride_log2); + struct ionic_v1_wqe *wqe = (void *)0; + int num_sge = 0; + + if (expdb) + sge -= 1; + + if (spec > IONIC_V1_SPEC_FIRST_SGE) + num_sge = IONIC_V1_SPEC_FIRST_SGE; + + num_sge = sge - &wqe->common.pld.sgl[num_sge]; + + if (spec && num_sge > spec) + num_sge = spec; + + return num_sge; +} + +static inline int ionic_v1_send_wqe_max_data(u8 stride_log2, bool expdb) +{ + struct ionic_v1_wqe *wqe = (void *)0; + __u8 *data = (void *)(1ull << stride_log2); + + if (expdb) + data -= IONIC_EXP_DBELL_SZ; + + return data - wqe->common.pld.data; +} + +static inline size_t ionic_v1_recv_wqe_min_size(int min_sge, int spec, + bool expdb) +{ + size_t sz_wqe, sz_sgl; + + if (spec > IONIC_V1_SPEC_FIRST_SGE) + min_sge += IONIC_V1_SPEC_FIRST_SGE; + + if (expdb) + min_sge += 1; + + sz_wqe = sizeof(struct ionic_v1_wqe); + sz_sgl = offsetof(struct ionic_v1_wqe, recv.pld.sgl[min_sge]); + + if (sz_sgl > sz_wqe) + sz_wqe = sz_sgl; + + return sz_wqe; +} + +static inline int ionic_v1_recv_wqe_max_sge(u8 stride_log2, int spec, + bool expdb) +{ + struct ionic_sge *sge = (void *)(1ull << stride_log2); + struct ionic_v1_wqe *wqe = (void *)0; + int num_sge = 0; + + if (expdb) + sge -= 1; + + if (spec > IONIC_V1_SPEC_FIRST_SGE) + num_sge = IONIC_V1_SPEC_FIRST_SGE; + + num_sge = sge - &wqe->recv.pld.sgl[num_sge]; + + if (spec && num_sge > spec) + num_sge = spec; + + return num_sge; +} + +static inline int ionic_v1_use_spec_sge(int min_sge, int spec) +{ + if (!spec || min_sge > spec) + return 0; + + if (min_sge <= IONIC_V1_SPEC_FIRST_SGE) + return IONIC_V1_SPEC_FIRST_SGE; + + return spec; +} + +struct ionic_admin_stats_hdr { + __le64 dma_addr; + __le32 length; + __le32 id_ver; + __u8 type_state; +} __packed; + +#define IONIC_ADMIN_STATS_HDRS_IN_V1_LEN 17 +static_assert(sizeof(struct ionic_admin_stats_hdr) == + IONIC_ADMIN_STATS_HDRS_IN_V1_LEN); + +struct ionic_admin_create_ah { + __le64 dma_addr; + __le32 length; + __le32 pd_id; + __le32 id_ver; + __le16 dbid_flags; + __u8 csum_profile; + __u8 crypto; +} __packed; + +#define IONIC_ADMIN_CREATE_AH_IN_V1_LEN 24 +static_assert(sizeof(struct ionic_admin_create_ah) == + IONIC_ADMIN_CREATE_AH_IN_V1_LEN); + +struct ionic_admin_destroy_ah { + __le32 ah_id; +} __packed; + +#define IONIC_ADMIN_DESTROY_AH_IN_V1_LEN 4 +static_assert(sizeof(struct ionic_admin_destroy_ah) == + IONIC_ADMIN_DESTROY_AH_IN_V1_LEN); + +struct ionic_admin_query_ah { + __le64 dma_addr; +} __packed; + +#define IONIC_ADMIN_QUERY_AH_IN_V1_LEN 8 +static_assert(sizeof(struct ionic_admin_query_ah) == + IONIC_ADMIN_QUERY_AH_IN_V1_LEN); + +struct ionic_admin_create_mr { + __le64 va; + __le64 length; + __le32 pd_id; + __le32 id_ver; + __le32 tbl_index; + __le32 map_count; + __le64 dma_addr; + __le16 dbid_flags; + __u8 pt_type; + __u8 dir_size_log2; + __u8 page_size_log2; +} __packed; + +#define IONIC_ADMIN_CREATE_MR_IN_V1_LEN 45 +static_assert(sizeof(struct ionic_admin_create_mr) == + IONIC_ADMIN_CREATE_MR_IN_V1_LEN); + +struct ionic_admin_destroy_mr { + __le32 mr_id; +} __packed; + +#define IONIC_ADMIN_DESTROY_MR_IN_V1_LEN 4 +static_assert(sizeof(struct ionic_admin_destroy_mr) == + IONIC_ADMIN_DESTROY_MR_IN_V1_LEN); + +struct ionic_admin_create_cq { + __le32 eq_id; + __u8 depth_log2; + __u8 stride_log2; + __u8 dir_size_log2_rsvd; + __u8 page_size_log2; + __le32 cq_flags; + __le32 id_ver; + __le32 tbl_index; + __le32 map_count; + __le64 dma_addr; + __le16 dbid_flags; +} __packed; + +#define IONIC_ADMIN_CREATE_CQ_IN_V1_LEN 34 +static_assert(sizeof(struct ionic_admin_create_cq) == + IONIC_ADMIN_CREATE_CQ_IN_V1_LEN); + +struct ionic_admin_destroy_cq { + __le32 cq_id; +} __packed; + +#define IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN 4 +static_assert(sizeof(struct ionic_admin_destroy_cq) == + IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN); + +struct ionic_admin_create_qp { + __le32 pd_id; + __be32 priv_flags; + __le32 sq_cq_id; + __u8 sq_depth_log2; + __u8 sq_stride_log2; + __u8 sq_dir_size_log2_rsvd; + __u8 sq_page_size_log2; + __le32 sq_tbl_index_xrcd_id; + __le32 sq_map_count; + __le64 sq_dma_addr; + __le32 rq_cq_id; + __u8 rq_depth_log2; + __u8 rq_stride_log2; + __u8 rq_dir_size_log2_rsvd; + __u8 rq_page_size_log2; + __le32 rq_tbl_index_srq_id; + __le32 rq_map_count; + __le64 rq_dma_addr; + __le32 id_ver; + __le16 dbid_flags; + __u8 type_state; + __u8 rsvd; +} __packed; + +#define IONIC_ADMIN_CREATE_QP_IN_V1_LEN 64 +static_assert(sizeof(struct ionic_admin_create_qp) == + IONIC_ADMIN_CREATE_QP_IN_V1_LEN); + +struct ionic_admin_destroy_qp { + __le32 qp_id; +} __packed; + +#define IONIC_ADMIN_DESTROY_QP_IN_V1_LEN 4 +static_assert(sizeof(struct ionic_admin_destroy_qp) == + IONIC_ADMIN_DESTROY_QP_IN_V1_LEN); + +struct ionic_admin_mod_qp { + __be32 attr_mask; + __u8 dcqcn_profile; + __u8 tfp_csum_profile; + __be16 access_flags; + __le32 rq_psn; + __le32 sq_psn; + __le32 qkey_dest_qpn; + __le32 rate_limit_kbps; + __u8 pmtu; + __u8 retry; + __u8 rnr_timer; + __u8 retry_timeout; + __u8 rsq_depth; + __u8 rrq_depth; + __le16 pkey_id; + __le32 ah_id_len; + __u8 en_pcp; + __u8 ip_dscp; + __u8 rsvd2; + __u8 type_state; + union { + struct { + __le16 rsvd1; + }; + __le32 rrq_index; + }; + __le32 rsq_index; + __le64 dma_addr; + __le32 id_ver; +} __packed; + +#define IONIC_ADMIN_MODIFY_QP_IN_V1_LEN 60 +static_assert(sizeof(struct ionic_admin_mod_qp) == + IONIC_ADMIN_MODIFY_QP_IN_V1_LEN); + +struct ionic_admin_query_qp { + __le64 hdr_dma_addr; + __le64 sq_dma_addr; + __le64 rq_dma_addr; + __le32 ah_id; + __le32 id_ver; + __le16 dbid_flags; +} __packed; + +#define IONIC_ADMIN_QUERY_QP_IN_V1_LEN 34 +static_assert(sizeof(struct ionic_admin_query_qp) == + IONIC_ADMIN_QUERY_QP_IN_V1_LEN); + +#define ADMIN_WQE_STRIDE 64 +#define ADMIN_WQE_HDR_LEN 4 + +/* admin queue v1 wqe */ +struct ionic_v1_admin_wqe { + __u8 op; + __u8 rsvd; + __le16 len; + + union { + struct ionic_admin_stats_hdr stats; + struct ionic_admin_create_ah create_ah; + struct ionic_admin_destroy_ah destroy_ah; + struct ionic_admin_query_ah query_ah; + struct ionic_admin_create_mr create_mr; + struct ionic_admin_destroy_mr destroy_mr; + struct ionic_admin_create_cq create_cq; + struct ionic_admin_destroy_cq destroy_cq; + struct ionic_admin_create_qp create_qp; + struct ionic_admin_destroy_qp destroy_qp; + struct ionic_admin_mod_qp mod_qp; + struct ionic_admin_query_qp query_qp; + } cmd; +}; + +/* side data for query qp */ +struct ionic_v1_admin_query_qp_sq { + __u8 rnr_timer; + __u8 retry_timeout; + __be16 access_perms_flags; + __be16 rsvd; + __be16 pkey_id; + __be32 qkey_dest_qpn; + __be32 rate_limit_kbps; + __be32 rq_psn; +}; + +struct ionic_v1_admin_query_qp_rq { + __u8 state_pmtu; + __u8 retry_rnrtry; + __u8 rrq_depth; + __u8 rsq_depth; + __be32 sq_psn; + __be16 access_perms_flags; + __be16 rsvd; +}; + +/* admin queue v1 opcodes */ +enum ionic_v1_admin_op { + IONIC_V1_ADMIN_NOOP, + IONIC_V1_ADMIN_CREATE_CQ, + IONIC_V1_ADMIN_CREATE_QP, + IONIC_V1_ADMIN_CREATE_MR, + IONIC_V1_ADMIN_STATS_HDRS, + IONIC_V1_ADMIN_STATS_VALS, + IONIC_V1_ADMIN_DESTROY_MR, + IONIC_V1_ADMIN_RSVD_7, /* RESIZE_CQ */ + IONIC_V1_ADMIN_DESTROY_CQ, + IONIC_V1_ADMIN_MODIFY_QP, + IONIC_V1_ADMIN_QUERY_QP, + IONIC_V1_ADMIN_DESTROY_QP, + IONIC_V1_ADMIN_DEBUG, + IONIC_V1_ADMIN_CREATE_AH, + IONIC_V1_ADMIN_QUERY_AH, + IONIC_V1_ADMIN_MODIFY_DCQCN, + IONIC_V1_ADMIN_DESTROY_AH, + IONIC_V1_ADMIN_QP_STATS_HDRS, + IONIC_V1_ADMIN_QP_STATS_VALS, + IONIC_V1_ADMIN_OPCODES_MAX, +}; + +/* admin queue v1 cqe status */ +enum ionic_v1_admin_status { + IONIC_V1_ASTS_OK, + IONIC_V1_ASTS_BAD_CMD, + IONIC_V1_ASTS_BAD_INDEX, + IONIC_V1_ASTS_BAD_STATE, + IONIC_V1_ASTS_BAD_TYPE, + IONIC_V1_ASTS_BAD_ATTR, + IONIC_V1_ASTS_MSG_TOO_BIG, +}; + +/* event queue v1 eqe */ +struct ionic_v1_eqe { + __be32 evt; +}; + +/* bits for cqe queue_type_flags */ +enum ionic_v1_eqe_evt_bits { + IONIC_V1_EQE_COLOR = BIT(0), + IONIC_V1_EQE_TYPE_SHIFT = 1, + IONIC_V1_EQE_TYPE_MASK = 0x7, + IONIC_V1_EQE_CODE_SHIFT = 4, + IONIC_V1_EQE_CODE_MASK = 0xf, + IONIC_V1_EQE_QID_SHIFT = 8, + + /* cq events */ + IONIC_V1_EQE_TYPE_CQ = 0, + /* cq normal events */ + IONIC_V1_EQE_CQ_NOTIFY = 0, + /* cq error events */ + IONIC_V1_EQE_CQ_ERR = 8, + + /* qp and srq events */ + IONIC_V1_EQE_TYPE_QP = 1, + /* qp normal events */ + IONIC_V1_EQE_SRQ_LEVEL = 0, + IONIC_V1_EQE_SQ_DRAIN = 1, + IONIC_V1_EQE_QP_COMM_EST = 2, + IONIC_V1_EQE_QP_LAST_WQE = 3, + /* qp error events */ + IONIC_V1_EQE_QP_ERR = 8, + IONIC_V1_EQE_QP_ERR_REQUEST = 9, + IONIC_V1_EQE_QP_ERR_ACCESS = 10, +}; + +enum ionic_tfp_csum_profiles { + IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP = 0, + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP = 1, + IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP = 2, + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP = 3, + IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 4, + IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 5, + IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 6, + IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 7, + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_IPV4_UDP = 8, + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_ESP_UDP = 9, + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_UDP = 10, + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_ESP_UDP = 11, + IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_CSUM = 12, +}; + +static inline bool ionic_v1_eqe_color(struct ionic_v1_eqe *eqe) +{ + return eqe->evt & cpu_to_be32(IONIC_V1_EQE_COLOR); +} + +static inline u32 ionic_v1_eqe_evt(struct ionic_v1_eqe *eqe) +{ + return be32_to_cpu(eqe->evt); +} + +static inline u8 ionic_v1_eqe_evt_type(u32 evt) +{ + return (evt >> IONIC_V1_EQE_TYPE_SHIFT) & IONIC_V1_EQE_TYPE_MASK; +} + +static inline u8 ionic_v1_eqe_evt_code(u32 evt) +{ + return (evt >> IONIC_V1_EQE_CODE_SHIFT) & IONIC_V1_EQE_CODE_MASK; +} + +static inline u32 ionic_v1_eqe_evt_qid(u32 evt) +{ + return evt >> IONIC_V1_EQE_QID_SHIFT; +} + +enum ionic_v1_stat_bits { + IONIC_V1_STAT_TYPE_SHIFT = 28, + IONIC_V1_STAT_TYPE_NONE = 0, + IONIC_V1_STAT_TYPE_8 = 1, + IONIC_V1_STAT_TYPE_LE16 = 2, + IONIC_V1_STAT_TYPE_LE32 = 3, + IONIC_V1_STAT_TYPE_LE64 = 4, + IONIC_V1_STAT_TYPE_BE16 = 5, + IONIC_V1_STAT_TYPE_BE32 = 6, + IONIC_V1_STAT_TYPE_BE64 = 7, + IONIC_V1_STAT_OFF_MASK = BIT(IONIC_V1_STAT_TYPE_SHIFT) - 1, +}; + +struct ionic_v1_stat { + union { + __be32 be_type_off; + u32 type_off; + }; + char name[28]; +}; + +static inline int ionic_v1_stat_type(struct ionic_v1_stat *hdr) +{ + return hdr->type_off >> IONIC_V1_STAT_TYPE_SHIFT; +} + +static inline unsigned int ionic_v1_stat_off(struct ionic_v1_stat *hdr) +{ + return hdr->type_off & IONIC_V1_STAT_OFF_MASK; +} + +#endif /* _IONIC_FW_H_ */ diff --git a/drivers/infiniband/hw/ionic/ionic_hw_stats.c b/drivers/infiniband/hw/ionic/ionic_hw_stats.c new file mode 100644 index 000000000000..244a80dde08f --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_hw_stats.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#include <linux/dma-mapping.h> + +#include "ionic_fw.h" +#include "ionic_ibdev.h" + +static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats, + int hw_stats_count) +{ + int hw_stat_i; + + for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) { + struct ionic_v1_stat *stat = &hw_stats[hw_stat_i]; + + stat->type_off = be32_to_cpu(stat->be_type_off); + stat->name[sizeof(stat->name) - 1] = 0; + if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE) + break; + } + + return hw_stat_i; +} + +static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs, + struct ionic_v1_stat *hw_stats, + int hw_stats_count) +{ + int hw_stat_i; + + for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) { + struct ionic_v1_stat *stat = &hw_stats[hw_stat_i]; + + hw_stats_hdrs[hw_stat_i].name = stat->name; + } +} + +static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat, + void *vals_buf, size_t vals_len) +{ + unsigned int off = ionic_v1_stat_off(stat); + int type = ionic_v1_stat_type(stat); + +#define __ionic_v1_stat_validate(__type) \ + ((off + sizeof(__type) <= vals_len) && \ + (IS_ALIGNED(off, sizeof(__type)))) + + switch (type) { + case IONIC_V1_STAT_TYPE_8: + if (__ionic_v1_stat_validate(u8)) + return *(u8 *)(vals_buf + off); + break; + case IONIC_V1_STAT_TYPE_LE16: + if (__ionic_v1_stat_validate(__le16)) + return le16_to_cpu(*(__le16 *)(vals_buf + off)); + break; + case IONIC_V1_STAT_TYPE_LE32: + if (__ionic_v1_stat_validate(__le32)) + return le32_to_cpu(*(__le32 *)(vals_buf + off)); + break; + case IONIC_V1_STAT_TYPE_LE64: + if (__ionic_v1_stat_validate(__le64)) + return le64_to_cpu(*(__le64 *)(vals_buf + off)); + break; + case IONIC_V1_STAT_TYPE_BE16: + if (__ionic_v1_stat_validate(__be16)) + return be16_to_cpu(*(__be16 *)(vals_buf + off)); + break; + case IONIC_V1_STAT_TYPE_BE32: + if (__ionic_v1_stat_validate(__be32)) + return be32_to_cpu(*(__be32 *)(vals_buf + off)); + break; + case IONIC_V1_STAT_TYPE_BE64: + if (__ionic_v1_stat_validate(__be64)) + return be64_to_cpu(*(__be64 *)(vals_buf + off)); + break; + } + + return ~0ull; +#undef __ionic_v1_stat_validate +} + +static int ionic_hw_stats_cmd(struct ionic_ibdev *dev, + dma_addr_t dma, size_t len, int qid, int op) +{ + struct ionic_admin_wr wr = { + .work = COMPLETION_INITIALIZER_ONSTACK(wr.work), + .wqe = { + .op = op, + .len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN), + .cmd.stats = { + .dma_addr = cpu_to_le64(dma), + .length = cpu_to_le32(len), + .id_ver = cpu_to_le32(qid), + }, + } + }; + + if (dev->lif_cfg.admin_opcodes <= op) + return -EBADRQC; + + ionic_admin_post(dev, &wr); + + return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT); +} + +static int ionic_init_hw_stats(struct ionic_ibdev *dev) +{ + dma_addr_t hw_stats_dma; + int rc, hw_stats_count; + + if (dev->hw_stats_hdrs) + return 0; + + dev->hw_stats_count = 0; + + /* buffer for current values from the device */ + dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!dev->hw_stats_buf) { + rc = -ENOMEM; + goto err_buf; + } + + /* buffer for names, sizes, offsets of values */ + dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!dev->hw_stats) { + rc = -ENOMEM; + goto err_hw_stats; + } + + /* request the names, sizes, offsets */ + hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats, + PAGE_SIZE, DMA_FROM_DEVICE); + rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma); + if (rc) + goto err_dma; + + rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0, + IONIC_V1_ADMIN_STATS_HDRS); + if (rc) + goto err_cmd; + + dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE); + + /* normalize and count the number of hw_stats */ + hw_stats_count = + ionic_v1_stat_normalize(dev->hw_stats, + PAGE_SIZE / sizeof(*dev->hw_stats)); + if (!hw_stats_count) { + rc = -ENODATA; + goto err_dma; + } + + dev->hw_stats_count = hw_stats_count; + + /* alloc and init array of names, for alloc_hw_stats */ + dev->hw_stats_hdrs = kcalloc(hw_stats_count, + sizeof(*dev->hw_stats_hdrs), + GFP_KERNEL); + if (!dev->hw_stats_hdrs) { + rc = -ENOMEM; + goto err_dma; + } + + ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats, + hw_stats_count); + + return 0; + +err_cmd: + dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE); +err_dma: + kfree(dev->hw_stats); +err_hw_stats: + kfree(dev->hw_stats_buf); +err_buf: + dev->hw_stats_count = 0; + dev->hw_stats = NULL; + dev->hw_stats_buf = NULL; + dev->hw_stats_hdrs = NULL; + return rc; +} + +static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev, + u32 port) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibdev); + + if (port != 1) + return NULL; + + return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs, + dev->hw_stats_count, + RDMA_HW_STATS_DEFAULT_LIFESPAN); +} + +static int ionic_get_hw_stats(struct ib_device *ibdev, + struct rdma_hw_stats *hw_stats, + u32 port, int index) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibdev); + dma_addr_t hw_stats_dma; + int rc, hw_stat_i; + + if (port != 1) + return -EINVAL; + + hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf, + PAGE_SIZE, DMA_FROM_DEVICE); + rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma); + if (rc) + goto err_dma; + + rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, + 0, IONIC_V1_ADMIN_STATS_VALS); + if (rc) + goto err_cmd; + + dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, + PAGE_SIZE, DMA_FROM_DEVICE); + + for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i) + hw_stats->value[hw_stat_i] = + ionic_v1_stat_val(&dev->hw_stats[hw_stat_i], + dev->hw_stats_buf, PAGE_SIZE); + + return hw_stat_i; + +err_cmd: + dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, + PAGE_SIZE, DMA_FROM_DEVICE); +err_dma: + return rc; +} + +static struct rdma_hw_stats * +ionic_counter_alloc_stats(struct rdma_counter *counter) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(counter->device); + struct ionic_counter *cntr; + int err; + + cntr = kzalloc(sizeof(*cntr), GFP_KERNEL); + if (!cntr) + return NULL; + + /* buffer for current values from the device */ + cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!cntr->vals) + goto err_vals; + + err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id, + cntr, + XA_LIMIT(0, IONIC_MAX_QPID), + GFP_KERNEL); + if (err) + goto err_xa; + + INIT_LIST_HEAD(&cntr->qp_list); + + return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs, + dev->counter_stats->queue_stats_count, + RDMA_HW_STATS_DEFAULT_LIFESPAN); +err_xa: + kfree(cntr->vals); +err_vals: + kfree(cntr); + + return NULL; +} + +static int ionic_counter_dealloc(struct rdma_counter *counter) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(counter->device); + struct ionic_counter *cntr; + + cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id); + if (!cntr) + return -EINVAL; + + kfree(cntr->vals); + kfree(cntr); + + return 0; +} + +static int ionic_counter_bind_qp(struct rdma_counter *counter, + struct ib_qp *ibqp, + u32 port) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(counter->device); + struct ionic_qp *qp = to_ionic_qp(ibqp); + struct ionic_counter *cntr; + + cntr = xa_load(&dev->counter_stats->xa_counters, counter->id); + if (!cntr) + return -EINVAL; + + list_add_tail(&qp->qp_list_counter, &cntr->qp_list); + ibqp->counter = counter; + + return 0; +} + +static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port) +{ + struct ionic_qp *qp = to_ionic_qp(ibqp); + + if (ibqp->counter) { + list_del(&qp->qp_list_counter); + ibqp->counter = NULL; + } + + return 0; +} + +static int ionic_get_qp_stats(struct ib_device *ibdev, + struct rdma_hw_stats *hw_stats, + u32 counter_id) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibdev); + struct ionic_counter_stats *cs; + struct ionic_counter *cntr; + dma_addr_t hw_stats_dma; + struct ionic_qp *qp; + int rc, stat_i = 0; + + cs = dev->counter_stats; + cntr = xa_load(&cs->xa_counters, counter_id); + if (!cntr) + return -EINVAL; + + hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals, + PAGE_SIZE, DMA_FROM_DEVICE); + rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma); + if (rc) + return rc; + + memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters); + + list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) { + rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, + qp->qpid, + IONIC_V1_ADMIN_QP_STATS_VALS); + if (rc) + goto err_cmd; + + for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i) + hw_stats->value[stat_i] += + ionic_v1_stat_val(&cs->hdr[stat_i], + cntr->vals, + PAGE_SIZE); + } + + dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE); + return stat_i; + +err_cmd: + dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE); + + return rc; +} + +static int ionic_counter_update_stats(struct rdma_counter *counter) +{ + return ionic_get_qp_stats(counter->device, counter->stats, counter->id); +} + +static int ionic_alloc_counters(struct ionic_ibdev *dev) +{ + struct ionic_counter_stats *cs = dev->counter_stats; + int rc, hw_stats_count; + dma_addr_t hdr_dma; + + /* buffer for names, sizes, offsets of values */ + cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!cs->hdr) + return -ENOMEM; + + hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr, + PAGE_SIZE, DMA_FROM_DEVICE); + rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma); + if (rc) + goto err_dma; + + rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0, + IONIC_V1_ADMIN_QP_STATS_HDRS); + if (rc) + goto err_cmd; + + dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE); + + /* normalize and count the number of hw_stats */ + hw_stats_count = ionic_v1_stat_normalize(cs->hdr, + PAGE_SIZE / sizeof(*cs->hdr)); + if (!hw_stats_count) { + rc = -ENODATA; + goto err_dma; + } + + cs->queue_stats_count = hw_stats_count; + + /* alloc and init array of names */ + cs->stats_hdrs = kcalloc(hw_stats_count, sizeof(*cs->stats_hdrs), + GFP_KERNEL); + if (!cs->stats_hdrs) { + rc = -ENOMEM; + goto err_dma; + } + + ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count); + + return 0; + +err_cmd: + dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE); +err_dma: + kfree(cs->hdr); + + return rc; +} + +static const struct ib_device_ops ionic_hw_stats_ops = { + .driver_id = RDMA_DRIVER_IONIC, + .alloc_hw_port_stats = ionic_alloc_hw_stats, + .get_hw_stats = ionic_get_hw_stats, +}; + +static const struct ib_device_ops ionic_counter_stats_ops = { + .counter_alloc_stats = ionic_counter_alloc_stats, + .counter_dealloc = ionic_counter_dealloc, + .counter_bind_qp = ionic_counter_bind_qp, + .counter_unbind_qp = ionic_counter_unbind_qp, + .counter_update_stats = ionic_counter_update_stats, +}; + +void ionic_stats_init(struct ionic_ibdev *dev) +{ + u16 stats_type = dev->lif_cfg.stats_type; + int rc; + + if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) { + rc = ionic_init_hw_stats(dev); + if (rc) + ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n"); + else + ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops); + } + + if (stats_type & IONIC_LIF_RDMA_STAT_QP) { + dev->counter_stats = kzalloc(sizeof(*dev->counter_stats), + GFP_KERNEL); + if (!dev->counter_stats) + return; + + rc = ionic_alloc_counters(dev); + if (rc) { + ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n"); + kfree(dev->counter_stats); + dev->counter_stats = NULL; + return; + } + + xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC); + + ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops); + } +} + +void ionic_stats_cleanup(struct ionic_ibdev *dev) +{ + if (dev->counter_stats) { + xa_destroy(&dev->counter_stats->xa_counters); + kfree(dev->counter_stats->hdr); + kfree(dev->counter_stats->stats_hdrs); + kfree(dev->counter_stats); + dev->counter_stats = NULL; + } + + kfree(dev->hw_stats); + kfree(dev->hw_stats_buf); + kfree(dev->hw_stats_hdrs); +} diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.c b/drivers/infiniband/hw/ionic/ionic_ibdev.c new file mode 100644 index 000000000000..164046d00e5d --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_ibdev.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/pci.h> +#include <linux/irq.h> +#include <net/addrconf.h> +#include <rdma/ib_addr.h> +#include <rdma/ib_mad.h> + +#include "ionic_ibdev.h" + +#define DRIVER_DESCRIPTION "AMD Pensando RoCE HCA driver" +#define DEVICE_DESCRIPTION "AMD Pensando RoCE HCA" + +MODULE_AUTHOR("Allen Hubbe <allen.hubbe@amd.com>"); +MODULE_DESCRIPTION(DRIVER_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("NET_IONIC"); + +static int ionic_query_device(struct ib_device *ibdev, + struct ib_device_attr *attr, + struct ib_udata *udata) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibdev); + struct net_device *ndev; + + ndev = ib_device_get_netdev(ibdev, 1); + addrconf_ifid_eui48((u8 *)&attr->sys_image_guid, ndev); + dev_put(ndev); + attr->max_mr_size = dev->lif_cfg.npts_per_lif * PAGE_SIZE / 2; + attr->page_size_cap = dev->lif_cfg.page_size_supported; + + attr->vendor_id = to_pci_dev(dev->lif_cfg.hwdev)->vendor; + attr->vendor_part_id = to_pci_dev(dev->lif_cfg.hwdev)->device; + + attr->hw_ver = ionic_lif_asic_rev(dev->lif_cfg.lif); + attr->fw_ver = 0; + attr->max_qp = dev->lif_cfg.qp_count; + attr->max_qp_wr = IONIC_MAX_DEPTH; + attr->device_cap_flags = + IB_DEVICE_MEM_WINDOW | + IB_DEVICE_MEM_MGT_EXTENSIONS | + IB_DEVICE_MEM_WINDOW_TYPE_2B | + 0; + attr->max_send_sge = + min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0, false), + IONIC_SPEC_HIGH); + attr->max_recv_sge = + min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false), + IONIC_SPEC_HIGH); + attr->max_sge_rd = attr->max_send_sge; + attr->max_cq = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count; + attr->max_cqe = IONIC_MAX_CQ_DEPTH - IONIC_CQ_GRACE; + attr->max_mr = dev->lif_cfg.nmrs_per_lif; + attr->max_pd = IONIC_MAX_PD; + attr->max_qp_rd_atom = IONIC_MAX_RD_ATOM; + attr->max_ee_rd_atom = 0; + attr->max_res_rd_atom = IONIC_MAX_RD_ATOM; + attr->max_qp_init_rd_atom = IONIC_MAX_RD_ATOM; + attr->max_ee_init_rd_atom = 0; + attr->atomic_cap = IB_ATOMIC_GLOB; + attr->masked_atomic_cap = IB_ATOMIC_GLOB; + attr->max_mw = dev->lif_cfg.nmrs_per_lif; + attr->max_mcast_grp = 0; + attr->max_mcast_qp_attach = 0; + attr->max_ah = dev->lif_cfg.nahs_per_lif; + attr->max_fast_reg_page_list_len = dev->lif_cfg.npts_per_lif / 2; + attr->max_pkeys = IONIC_PKEY_TBL_LEN; + + return 0; +} + +static int ionic_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *attr) +{ + struct net_device *ndev; + + if (port != 1) + return -EINVAL; + + ndev = ib_device_get_netdev(ibdev, port); + + if (netif_running(ndev) && netif_carrier_ok(ndev)) { + attr->state = IB_PORT_ACTIVE; + attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; + } else if (netif_running(ndev)) { + attr->state = IB_PORT_DOWN; + attr->phys_state = IB_PORT_PHYS_STATE_POLLING; + } else { + attr->state = IB_PORT_DOWN; + attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; + } + + attr->max_mtu = iboe_get_mtu(ndev->max_mtu); + attr->active_mtu = min(attr->max_mtu, iboe_get_mtu(ndev->mtu)); + attr->gid_tbl_len = IONIC_GID_TBL_LEN; + attr->ip_gids = true; + attr->port_cap_flags = 0; + attr->max_msg_sz = 0x80000000; + attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN; + attr->max_vl_num = 1; + attr->subnet_prefix = 0xfe80000000000000ull; + + dev_put(ndev); + + return ib_get_eth_speed(ibdev, port, + &attr->active_speed, + &attr->active_width); +} + +static enum rdma_link_layer ionic_get_link_layer(struct ib_device *ibdev, + u32 port) +{ + return IB_LINK_LAYER_ETHERNET; +} + +static int ionic_query_pkey(struct ib_device *ibdev, u32 port, u16 index, + u16 *pkey) +{ + if (port != 1) + return -EINVAL; + + if (index != 0) + return -EINVAL; + + *pkey = IB_DEFAULT_PKEY_FULL; + + return 0; +} + +static int ionic_modify_device(struct ib_device *ibdev, int mask, + struct ib_device_modify *attr) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibdev); + + if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) + return -EOPNOTSUPP; + + if (mask & IB_DEVICE_MODIFY_NODE_DESC) + memcpy(dev->ibdev.node_desc, attr->node_desc, + IB_DEVICE_NODE_DESC_MAX); + + return 0; +} + +static int ionic_get_port_immutable(struct ib_device *ibdev, u32 port, + struct ib_port_immutable *attr) +{ + if (port != 1) + return -EINVAL; + + attr->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + + attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN; + attr->gid_tbl_len = IONIC_GID_TBL_LEN; + attr->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} + +static void ionic_get_dev_fw_str(struct ib_device *ibdev, char *str) +{ + struct ionic_ibdev *dev = to_ionic_ibdev(ibdev); + + ionic_lif_fw_version(dev->lif_cfg.lif, str, IB_FW_VERSION_NAME_MAX); +} + +static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct ionic_ibdev *dev = + rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev); + + return sysfs_emit(buf, "0x%x\n", ionic_lif_asic_rev(dev->lif_cfg.lif)); +} +static DEVICE_ATTR_RO(hw_rev); + +static ssize_t hca_type_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ionic_ibdev *dev = + rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev); + + return sysfs_emit(buf, "%s\n", dev->ibdev.node_desc); +} +static DEVICE_ATTR_RO(hca_type); + +static struct attribute *ionic_rdma_attributes[] = { + &dev_attr_hw_rev.attr, + &dev_attr_hca_type.attr, + NULL +}; + +static const struct attribute_group ionic_rdma_attr_group = { + .attrs = ionic_rdma_attributes, +}; + +static void ionic_disassociate_ucontext(struct ib_ucontext *ibcontext) +{ + /* + * Dummy define disassociate_ucontext so that it does not + * wait for user context before cleaning up hw resources. + */ +} + +static const struct ib_device_ops ionic_dev_ops = { + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_IONIC, + .uverbs_abi_ver = IONIC_ABI_VERSION, + + .alloc_ucontext = ionic_alloc_ucontext, + .dealloc_ucontext = ionic_dealloc_ucontext, + .mmap = ionic_mmap, + .mmap_free = ionic_mmap_free, + .alloc_pd = ionic_alloc_pd, + .dealloc_pd = ionic_dealloc_pd, + .create_ah = ionic_create_ah, + .query_ah = ionic_query_ah, + .destroy_ah = ionic_destroy_ah, + .create_user_ah = ionic_create_ah, + .get_dma_mr = ionic_get_dma_mr, + .reg_user_mr = ionic_reg_user_mr, + .reg_user_mr_dmabuf = ionic_reg_user_mr_dmabuf, + .dereg_mr = ionic_dereg_mr, + .alloc_mr = ionic_alloc_mr, + .map_mr_sg = ionic_map_mr_sg, + .alloc_mw = ionic_alloc_mw, + .dealloc_mw = ionic_dealloc_mw, + .create_cq = ionic_create_cq, + .destroy_cq = ionic_destroy_cq, + .create_qp = ionic_create_qp, + .modify_qp = ionic_modify_qp, + .query_qp = ionic_query_qp, + .destroy_qp = ionic_destroy_qp, + + .post_send = ionic_post_send, + .post_recv = ionic_post_recv, + .poll_cq = ionic_poll_cq, + .req_notify_cq = ionic_req_notify_cq, + + .query_device = ionic_query_device, + .query_port = ionic_query_port, + .get_link_layer = ionic_get_link_layer, + .query_pkey = ionic_query_pkey, + .modify_device = ionic_modify_device, + .get_port_immutable = ionic_get_port_immutable, + .get_dev_fw_str = ionic_get_dev_fw_str, + .device_group = &ionic_rdma_attr_group, + .disassociate_ucontext = ionic_disassociate_ucontext, + + INIT_RDMA_OBJ_SIZE(ib_ucontext, ionic_ctx, ibctx), + INIT_RDMA_OBJ_SIZE(ib_pd, ionic_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ah, ionic_ah, ibah), + INIT_RDMA_OBJ_SIZE(ib_cq, ionic_vcq, ibcq), + INIT_RDMA_OBJ_SIZE(ib_qp, ionic_qp, ibqp), + INIT_RDMA_OBJ_SIZE(ib_mw, ionic_mr, ibmw), +}; + +static void ionic_init_resids(struct ionic_ibdev *dev) +{ + ionic_resid_init(&dev->inuse_cqid, dev->lif_cfg.cq_count); + dev->half_cqid_udma_shift = + order_base_2(dev->lif_cfg.cq_count / dev->lif_cfg.udma_count); + ionic_resid_init(&dev->inuse_pdid, IONIC_MAX_PD); + ionic_resid_init(&dev->inuse_ahid, dev->lif_cfg.nahs_per_lif); + ionic_resid_init(&dev->inuse_mrid, dev->lif_cfg.nmrs_per_lif); + /* skip reserved lkey */ + dev->next_mrkey = 1; + ionic_resid_init(&dev->inuse_qpid, dev->lif_cfg.qp_count); + /* skip reserved SMI and GSI qpids */ + dev->half_qpid_udma_shift = + order_base_2(dev->lif_cfg.qp_count / dev->lif_cfg.udma_count); + ionic_resid_init(&dev->inuse_dbid, dev->lif_cfg.dbid_count); +} + +static void ionic_destroy_resids(struct ionic_ibdev *dev) +{ + ionic_resid_destroy(&dev->inuse_cqid); + ionic_resid_destroy(&dev->inuse_pdid); + ionic_resid_destroy(&dev->inuse_ahid); + ionic_resid_destroy(&dev->inuse_mrid); + ionic_resid_destroy(&dev->inuse_qpid); + ionic_resid_destroy(&dev->inuse_dbid); +} + +static void ionic_destroy_ibdev(struct ionic_ibdev *dev) +{ + ionic_kill_rdma_admin(dev, false); + ib_unregister_device(&dev->ibdev); + ionic_stats_cleanup(dev); + ionic_destroy_rdma_admin(dev); + ionic_destroy_resids(dev); + WARN_ON(!xa_empty(&dev->qp_tbl)); + xa_destroy(&dev->qp_tbl); + WARN_ON(!xa_empty(&dev->cq_tbl)); + xa_destroy(&dev->cq_tbl); + ib_dealloc_device(&dev->ibdev); +} + +static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev) +{ + struct ib_device *ibdev; + struct ionic_ibdev *dev; + struct net_device *ndev; + int rc; + + dev = ib_alloc_device(ionic_ibdev, ibdev); + if (!dev) + return ERR_PTR(-EINVAL); + + ionic_fill_lif_cfg(ionic_adev->lif, &dev->lif_cfg); + + xa_init_flags(&dev->qp_tbl, GFP_ATOMIC); + xa_init_flags(&dev->cq_tbl, GFP_ATOMIC); + + ionic_init_resids(dev); + + rc = ionic_rdma_reset_devcmd(dev); + if (rc) + goto err_reset; + + rc = ionic_create_rdma_admin(dev); + if (rc) + goto err_admin; + + ibdev = &dev->ibdev; + ibdev->dev.parent = dev->lif_cfg.hwdev; + + strscpy(ibdev->name, "ionic_%d", IB_DEVICE_NAME_MAX); + strscpy(ibdev->node_desc, DEVICE_DESCRIPTION, IB_DEVICE_NODE_DESC_MAX); + + ibdev->node_type = RDMA_NODE_IB_CA; + ibdev->phys_port_cnt = 1; + + /* the first two eq are reserved for async events */ + ibdev->num_comp_vectors = dev->lif_cfg.eq_count - 2; + + ndev = ionic_lif_netdev(ionic_adev->lif); + addrconf_ifid_eui48((u8 *)&ibdev->node_guid, ndev); + rc = ib_device_set_netdev(ibdev, ndev, 1); + /* ionic_lif_netdev() returns ndev with refcount held */ + dev_put(ndev); + if (rc) + goto err_admin; + + ib_set_device_ops(&dev->ibdev, &ionic_dev_ops); + + ionic_stats_init(dev); + + rc = ib_register_device(ibdev, "ionic_%d", ibdev->dev.parent); + if (rc) + goto err_register; + + return dev; + +err_register: + ionic_stats_cleanup(dev); +err_admin: + ionic_kill_rdma_admin(dev, false); + ionic_destroy_rdma_admin(dev); +err_reset: + ionic_destroy_resids(dev); + xa_destroy(&dev->qp_tbl); + xa_destroy(&dev->cq_tbl); + ib_dealloc_device(&dev->ibdev); + + return ERR_PTR(rc); +} + +static int ionic_aux_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ionic_aux_dev *ionic_adev; + struct ionic_ibdev *dev; + + ionic_adev = container_of(adev, struct ionic_aux_dev, adev); + dev = ionic_create_ibdev(ionic_adev); + if (IS_ERR(dev)) + return dev_err_probe(&adev->dev, PTR_ERR(dev), + "Failed to register ibdev\n"); + + auxiliary_set_drvdata(adev, dev); + ibdev_dbg(&dev->ibdev, "registered\n"); + + return 0; +} + +static void ionic_aux_remove(struct auxiliary_device *adev) +{ + struct ionic_ibdev *dev = auxiliary_get_drvdata(adev); + + dev_dbg(&adev->dev, "unregister ibdev\n"); + ionic_destroy_ibdev(dev); + dev_dbg(&adev->dev, "unregistered\n"); +} + +static const struct auxiliary_device_id ionic_aux_id_table[] = { + { .name = "ionic.rdma", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, ionic_aux_id_table); + +static struct auxiliary_driver ionic_aux_r_driver = { + .name = "rdma", + .probe = ionic_aux_probe, + .remove = ionic_aux_remove, + .id_table = ionic_aux_id_table, +}; + +static int __init ionic_mod_init(void) +{ + int rc; + + ionic_evt_workq = create_workqueue(KBUILD_MODNAME "-evt"); + if (!ionic_evt_workq) + return -ENOMEM; + + rc = auxiliary_driver_register(&ionic_aux_r_driver); + if (rc) + goto err_aux; + + return 0; + +err_aux: + destroy_workqueue(ionic_evt_workq); + + return rc; +} + +static void __exit ionic_mod_exit(void) +{ + auxiliary_driver_unregister(&ionic_aux_r_driver); + destroy_workqueue(ionic_evt_workq); +} + +module_init(ionic_mod_init); +module_exit(ionic_mod_exit); diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.h b/drivers/infiniband/hw/ionic/ionic_ibdev.h new file mode 100644 index 000000000000..82fda1e3cdb6 --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_ibdev.h @@ -0,0 +1,517 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#ifndef _IONIC_IBDEV_H_ +#define _IONIC_IBDEV_H_ + +#include <rdma/ib_umem.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_pack.h> +#include <rdma/uverbs_ioctl.h> + +#include <rdma/ionic-abi.h> +#include <ionic_api.h> +#include <ionic_regs.h> + +#include "ionic_fw.h" +#include "ionic_queue.h" +#include "ionic_res.h" + +#include "ionic_lif_cfg.h" + +/* Config knobs */ +#define IONIC_EQ_DEPTH 511 +#define IONIC_EQ_COUNT 32 +#define IONIC_AQ_DEPTH 63 +#define IONIC_AQ_COUNT 4 +#define IONIC_EQ_ISR_BUDGET 10 +#define IONIC_EQ_WORK_BUDGET 1000 +#define IONIC_MAX_RD_ATOM 16 +#define IONIC_PKEY_TBL_LEN 1 +#define IONIC_GID_TBL_LEN 256 + +#define IONIC_MAX_QPID 0xffffff +#define IONIC_SPEC_HIGH 8 +#define IONIC_MAX_PD 1024 +#define IONIC_SPEC_HIGH 8 +#define IONIC_SQCMB_ORDER 5 +#define IONIC_RQCMB_ORDER 0 + +#define IONIC_META_LAST ((void *)1ul) +#define IONIC_META_POSTED ((void *)2ul) + +#define IONIC_CQ_GRACE 100 + +#define IONIC_ROCE_UDP_SPORT 28272 +#define IONIC_DMA_LKEY 0 +#define IONIC_DMA_RKEY IONIC_DMA_LKEY + +#define IONIC_CMB_SUPPORTED \ + (IONIC_CMB_ENABLE | IONIC_CMB_REQUIRE | IONIC_CMB_EXPDB | \ + IONIC_CMB_WC | IONIC_CMB_UC) + +/* resource is not reserved on the device, indicated in tbl_order */ +#define IONIC_RES_INVALID -1 + +struct ionic_aq; +struct ionic_cq; +struct ionic_eq; +struct ionic_vcq; + +enum ionic_admin_state { + IONIC_ADMIN_ACTIVE, /* submitting admin commands to queue */ + IONIC_ADMIN_PAUSED, /* not submitting, but may complete normally */ + IONIC_ADMIN_KILLED, /* not submitting, locally completed */ +}; + +enum ionic_admin_flags { + IONIC_ADMIN_F_BUSYWAIT = BIT(0), /* Don't sleep */ + IONIC_ADMIN_F_TEARDOWN = BIT(1), /* In destroy path */ + IONIC_ADMIN_F_INTERRUPT = BIT(2), /* Interruptible w/timeout */ +}; + +enum ionic_mmap_flag { + IONIC_MMAP_WC = BIT(0), +}; + +struct ionic_mmap_entry { + struct rdma_user_mmap_entry rdma_entry; + unsigned long size; + unsigned long pfn; + u8 mmap_flags; +}; + +struct ionic_ibdev { + struct ib_device ibdev; + + struct ionic_lif_cfg lif_cfg; + + struct xarray qp_tbl; + struct xarray cq_tbl; + + struct ionic_resid_bits inuse_dbid; + struct ionic_resid_bits inuse_pdid; + struct ionic_resid_bits inuse_ahid; + struct ionic_resid_bits inuse_mrid; + struct ionic_resid_bits inuse_qpid; + struct ionic_resid_bits inuse_cqid; + + u8 half_cqid_udma_shift; + u8 half_qpid_udma_shift; + u8 next_qpid_udma_idx; + u8 next_mrkey; + + struct work_struct reset_work; + bool reset_posted; + u32 reset_cnt; + + struct delayed_work admin_dwork; + struct ionic_aq **aq_vec; + atomic_t admin_state; + + struct ionic_eq **eq_vec; + + struct ionic_v1_stat *hw_stats; + void *hw_stats_buf; + struct rdma_stat_desc *hw_stats_hdrs; + struct ionic_counter_stats *counter_stats; + int hw_stats_count; +}; + +struct ionic_eq { + struct ionic_ibdev *dev; + + u32 eqid; + u32 intr; + + struct ionic_queue q; + + int armed; + bool enable; + + struct work_struct work; + + int irq; + char name[32]; +}; + +struct ionic_admin_wr { + struct completion work; + struct list_head aq_ent; + struct ionic_v1_admin_wqe wqe; + struct ionic_v1_cqe cqe; + struct ionic_aq *aq; + int status; +}; + +struct ionic_admin_wr_q { + struct ionic_admin_wr *wr; + int wqe_strides; +}; + +struct ionic_aq { + struct ionic_ibdev *dev; + struct ionic_vcq *vcq; + + struct work_struct work; + + atomic_t admin_state; + unsigned long stamp; + bool armed; + + u32 aqid; + u32 cqid; + + spinlock_t lock; /* for posting */ + struct ionic_queue q; + struct ionic_admin_wr_q *q_wr; + struct list_head wr_prod; + struct list_head wr_post; +}; + +struct ionic_ctx { + struct ib_ucontext ibctx; + u32 dbid; + struct rdma_user_mmap_entry *mmap_dbell; +}; + +struct ionic_tbl_buf { + u32 tbl_limit; + u32 tbl_pages; + size_t tbl_size; + __le64 *tbl_buf; + dma_addr_t tbl_dma; + u8 page_size_log2; +}; + +struct ionic_pd { + struct ib_pd ibpd; + + u32 pdid; + u32 flags; +}; + +struct ionic_cq { + struct ionic_vcq *vcq; + + u32 cqid; + u32 eqid; + + spinlock_t lock; /* for polling */ + struct list_head poll_sq; + bool flush; + struct list_head flush_sq; + struct list_head flush_rq; + struct list_head ibkill_flush_ent; + + struct ionic_queue q; + bool color; + int credit; + u16 arm_any_prod; + u16 arm_sol_prod; + + struct kref cq_kref; + struct completion cq_rel_comp; + + /* infrequently accessed, keep at end */ + struct ib_umem *umem; +}; + +struct ionic_vcq { + struct ib_cq ibcq; + struct ionic_cq cq[2]; + u8 udma_mask; + u8 poll_idx; +}; + +struct ionic_sq_meta { + u64 wrid; + u32 len; + u16 seq; + u8 ibop; + u8 ibsts; + u8 remote:1; + u8 signal:1; + u8 local_comp:1; +}; + +struct ionic_rq_meta { + struct ionic_rq_meta *next; + u64 wrid; +}; + +struct ionic_qp { + struct ib_qp ibqp; + enum ib_qp_state state; + + u32 qpid; + u32 ahid; + u32 sq_cqid; + u32 rq_cqid; + u8 udma_idx; + u8 has_ah:1; + u8 has_sq:1; + u8 has_rq:1; + u8 sig_all:1; + + struct list_head qp_list_counter; + + struct list_head cq_poll_sq; + struct list_head cq_flush_sq; + struct list_head cq_flush_rq; + struct list_head ibkill_flush_ent; + + spinlock_t sq_lock; /* for posting and polling */ + struct ionic_queue sq; + struct ionic_sq_meta *sq_meta; + u16 *sq_msn_idx; + int sq_spec; + u16 sq_old_prod; + u16 sq_msn_prod; + u16 sq_msn_cons; + u8 sq_cmb; + bool sq_flush; + bool sq_flush_rcvd; + + spinlock_t rq_lock; /* for posting and polling */ + struct ionic_queue rq; + struct ionic_rq_meta *rq_meta; + struct ionic_rq_meta *rq_meta_head; + int rq_spec; + u16 rq_old_prod; + u8 rq_cmb; + bool rq_flush; + + struct kref qp_kref; + struct completion qp_rel_comp; + + /* infrequently accessed, keep at end */ + int sgid_index; + int sq_cmb_order; + u32 sq_cmb_pgid; + phys_addr_t sq_cmb_addr; + struct rdma_user_mmap_entry *mmap_sq_cmb; + + struct ib_umem *sq_umem; + + int rq_cmb_order; + u32 rq_cmb_pgid; + phys_addr_t rq_cmb_addr; + struct rdma_user_mmap_entry *mmap_rq_cmb; + + struct ib_umem *rq_umem; + + int dcqcn_profile; + + struct ib_ud_header *hdr; +}; + +struct ionic_ah { + struct ib_ah ibah; + u32 ahid; + int sgid_index; + struct ib_ud_header hdr; +}; + +struct ionic_mr { + union { + struct ib_mr ibmr; + struct ib_mw ibmw; + }; + + u32 mrid; + int flags; + + struct ib_umem *umem; + struct ionic_tbl_buf buf; + bool created; +}; + +struct ionic_counter_stats { + int queue_stats_count; + struct ionic_v1_stat *hdr; + struct rdma_stat_desc *stats_hdrs; + struct xarray xa_counters; +}; + +struct ionic_counter { + void *vals; + struct list_head qp_list; +}; + +static inline struct ionic_ibdev *to_ionic_ibdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct ionic_ibdev, ibdev); +} + +static inline struct ionic_ctx *to_ionic_ctx(struct ib_ucontext *ibctx) +{ + return container_of(ibctx, struct ionic_ctx, ibctx); +} + +static inline struct ionic_ctx *to_ionic_ctx_uobj(struct ib_uobject *uobj) +{ + if (!uobj) + return NULL; + + if (!uobj->context) + return NULL; + + return to_ionic_ctx(uobj->context); +} + +static inline struct ionic_pd *to_ionic_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct ionic_pd, ibpd); +} + +static inline struct ionic_mr *to_ionic_mr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct ionic_mr, ibmr); +} + +static inline struct ionic_mr *to_ionic_mw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct ionic_mr, ibmw); +} + +static inline struct ionic_vcq *to_ionic_vcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct ionic_vcq, ibcq); +} + +static inline struct ionic_cq *to_ionic_vcq_cq(struct ib_cq *ibcq, + uint8_t udma_idx) +{ + return &to_ionic_vcq(ibcq)->cq[udma_idx]; +} + +static inline struct ionic_qp *to_ionic_qp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct ionic_qp, ibqp); +} + +static inline struct ionic_ah *to_ionic_ah(struct ib_ah *ibah) +{ + return container_of(ibah, struct ionic_ah, ibah); +} + +static inline u32 ionic_ctx_dbid(struct ionic_ibdev *dev, + struct ionic_ctx *ctx) +{ + if (!ctx) + return dev->lif_cfg.dbid; + + return ctx->dbid; +} + +static inline u32 ionic_obj_dbid(struct ionic_ibdev *dev, + struct ib_uobject *uobj) +{ + return ionic_ctx_dbid(dev, to_ionic_ctx_uobj(uobj)); +} + +static inline bool ionic_ibop_is_local(enum ib_wr_opcode op) +{ + return op == IB_WR_LOCAL_INV || op == IB_WR_REG_MR; +} + +static inline void ionic_qp_complete(struct kref *kref) +{ + struct ionic_qp *qp = container_of(kref, struct ionic_qp, qp_kref); + + complete(&qp->qp_rel_comp); +} + +static inline void ionic_cq_complete(struct kref *kref) +{ + struct ionic_cq *cq = container_of(kref, struct ionic_cq, cq_kref); + + complete(&cq->cq_rel_comp); +} + +/* ionic_admin.c */ +extern struct workqueue_struct *ionic_evt_workq; +void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr); +int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr, + enum ionic_admin_flags); + +int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev); + +int ionic_create_rdma_admin(struct ionic_ibdev *dev); +void ionic_destroy_rdma_admin(struct ionic_ibdev *dev); +void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path); + +/* ionic_controlpath.c */ +int ionic_create_cq_common(struct ionic_vcq *vcq, + struct ionic_tbl_buf *buf, + const struct ib_cq_init_attr *attr, + struct ionic_ctx *ctx, + struct ib_udata *udata, + struct ionic_qdesc *req_cq, + __u32 *resp_cqid, + int udma_idx); +void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq); +void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp); +void ionic_notify_flush_cq(struct ionic_cq *cq); + +int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata); +void ionic_dealloc_ucontext(struct ib_ucontext *ibctx); +int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma); +void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry); +int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, + struct ib_udata *udata); +int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); +int ionic_destroy_ah(struct ib_ah *ibah, u32 flags); +struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access); +struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, + u64 addr, int access, struct ib_dmah *dmah, + struct ib_udata *udata); +struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset, + u64 length, u64 addr, int fd, int access, + struct ib_dmah *dmah, + struct uverbs_attr_bundle *attrs); +int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); +struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type, + u32 max_sg); +int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset); +int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); +int ionic_dealloc_mw(struct ib_mw *ibmw); +int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct uverbs_attr_bundle *attrs); +int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); +int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, + struct ib_udata *udata); +int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, + struct ib_udata *udata); +int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, + struct ib_qp_init_attr *init_attr); +int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); + +/* ionic_datapath.c */ +int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad); +int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad); +int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc); +int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); + +/* ionic_hw_stats.c */ +void ionic_stats_init(struct ionic_ibdev *dev); +void ionic_stats_cleanup(struct ionic_ibdev *dev); + +/* ionic_pgtbl.c */ +__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va); +__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va); +int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma); +int ionic_pgtbl_init(struct ionic_ibdev *dev, + struct ionic_tbl_buf *buf, + struct ib_umem *umem, + dma_addr_t dma, + int limit, + u64 page_size); +void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf); +#endif /* _IONIC_IBDEV_H_ */ diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.c b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c new file mode 100644 index 000000000000..f3cd281c3a2f --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#include <linux/kernel.h> + +#include <ionic.h> +#include <ionic_lif.h> + +#include "ionic_lif_cfg.h" + +#define IONIC_MIN_RDMA_VERSION 0 +#define IONIC_MAX_RDMA_VERSION 2 + +static u8 ionic_get_expdb(struct ionic_lif *lif) +{ + u8 expdb_support = 0; + + if (lif->ionic->idev.phy_cmb_expdb64_pages) + expdb_support |= IONIC_EXPDB_64B_WQE; + if (lif->ionic->idev.phy_cmb_expdb128_pages) + expdb_support |= IONIC_EXPDB_128B_WQE; + if (lif->ionic->idev.phy_cmb_expdb256_pages) + expdb_support |= IONIC_EXPDB_256B_WQE; + if (lif->ionic->idev.phy_cmb_expdb512_pages) + expdb_support |= IONIC_EXPDB_512B_WQE; + + return expdb_support; +} + +void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg) +{ + union ionic_lif_identity *ident = &lif->ionic->ident.lif; + + cfg->lif = lif; + cfg->hwdev = &lif->ionic->pdev->dev; + cfg->lif_index = lif->index; + cfg->lif_hw_index = lif->hw_index; + + cfg->dbid = lif->kern_pid; + cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); + cfg->dbpage = lif->kern_dbpage; + cfg->intr_ctrl = lif->ionic->idev.intr_ctrl; + + cfg->db_phys = lif->ionic->bars[IONIC_PCI_BAR_DBELL].bus_addr; + + if (IONIC_VERSION(ident->rdma.version, ident->rdma.minor_version) >= + IONIC_VERSION(2, 1)) + cfg->page_size_supported = + le64_to_cpu(ident->rdma.page_size_cap); + else + cfg->page_size_supported = IONIC_PAGE_SIZE_SUPPORTED; + + cfg->rdma_version = ident->rdma.version; + cfg->qp_opcodes = ident->rdma.qp_opcodes; + cfg->admin_opcodes = ident->rdma.admin_opcodes; + + cfg->stats_type = le16_to_cpu(ident->rdma.stats_type); + cfg->npts_per_lif = le32_to_cpu(ident->rdma.npts_per_lif); + cfg->nmrs_per_lif = le32_to_cpu(ident->rdma.nmrs_per_lif); + cfg->nahs_per_lif = le32_to_cpu(ident->rdma.nahs_per_lif); + + cfg->aq_base = le32_to_cpu(ident->rdma.aq_qtype.qid_base); + cfg->cq_base = le32_to_cpu(ident->rdma.cq_qtype.qid_base); + cfg->eq_base = le32_to_cpu(ident->rdma.eq_qtype.qid_base); + + /* + * ionic_create_rdma_admin() may reduce aq_count or eq_count if + * it is unable to allocate all that were requested. + * aq_count is tunable; see ionic_aq_count + * eq_count is tunable; see ionic_eq_count + */ + cfg->aq_count = le32_to_cpu(ident->rdma.aq_qtype.qid_count); + cfg->eq_count = le32_to_cpu(ident->rdma.eq_qtype.qid_count); + cfg->cq_count = le32_to_cpu(ident->rdma.cq_qtype.qid_count); + cfg->qp_count = le32_to_cpu(ident->rdma.sq_qtype.qid_count); + cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); + + cfg->aq_qtype = ident->rdma.aq_qtype.qtype; + cfg->sq_qtype = ident->rdma.sq_qtype.qtype; + cfg->rq_qtype = ident->rdma.rq_qtype.qtype; + cfg->cq_qtype = ident->rdma.cq_qtype.qtype; + cfg->eq_qtype = ident->rdma.eq_qtype.qtype; + cfg->udma_qgrp_shift = ident->rdma.udma_shift; + cfg->udma_count = 2; + + cfg->max_stride = ident->rdma.max_stride; + cfg->expdb_mask = ionic_get_expdb(lif); + + cfg->sq_expdb = + !!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_EXPDB); + cfg->rq_expdb = + !!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_EXPDB); +} + +struct net_device *ionic_lif_netdev(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + + dev_hold(netdev); + return netdev; +} + +void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len) +{ + strscpy(str, lif->ionic->idev.dev_info.fw_version, len); +} + +u8 ionic_lif_asic_rev(struct ionic_lif *lif) +{ + return lif->ionic->idev.dev_info.asic_rev; +} diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.h b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h new file mode 100644 index 000000000000..20853429f623 --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#ifndef _IONIC_LIF_CFG_H_ + +#define IONIC_VERSION(a, b) (((a) << 16) + ((b) << 8)) +#define IONIC_PAGE_SIZE_SUPPORTED 0x40201000 /* 4kb, 2Mb, 1Gb */ + +#define IONIC_EXPDB_64B_WQE BIT(0) +#define IONIC_EXPDB_128B_WQE BIT(1) +#define IONIC_EXPDB_256B_WQE BIT(2) +#define IONIC_EXPDB_512B_WQE BIT(3) + +struct ionic_lif_cfg { + struct device *hwdev; + struct ionic_lif *lif; + + int lif_index; + int lif_hw_index; + + u32 dbid; + int dbid_count; + u64 __iomem *dbpage; + struct ionic_intr __iomem *intr_ctrl; + phys_addr_t db_phys; + + u64 page_size_supported; + u32 npts_per_lif; + u32 nmrs_per_lif; + u32 nahs_per_lif; + + u32 aq_base; + u32 cq_base; + u32 eq_base; + + int aq_count; + int eq_count; + int cq_count; + int qp_count; + + u16 stats_type; + u8 aq_qtype; + u8 sq_qtype; + u8 rq_qtype; + u8 cq_qtype; + u8 eq_qtype; + + u8 udma_count; + u8 udma_qgrp_shift; + + u8 rdma_version; + u8 qp_opcodes; + u8 admin_opcodes; + + u8 max_stride; + bool sq_expdb; + bool rq_expdb; + u8 expdb_mask; +}; + +void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg); +struct net_device *ionic_lif_netdev(struct ionic_lif *lif); +void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len); +u8 ionic_lif_asic_rev(struct ionic_lif *lif); + +#endif /* _IONIC_LIF_CFG_H_ */ diff --git a/drivers/infiniband/hw/ionic/ionic_pgtbl.c b/drivers/infiniband/hw/ionic/ionic_pgtbl.c new file mode 100644 index 000000000000..e74db73c9246 --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_pgtbl.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#include <linux/mman.h> +#include <linux/dma-mapping.h> + +#include "ionic_fw.h" +#include "ionic_ibdev.h" + +__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va) +{ + u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1; + u64 dma; + + if (!buf->tbl_pages) + return cpu_to_le64(0); + + if (buf->tbl_pages > 1) + return cpu_to_le64(buf->tbl_dma); + + if (buf->tbl_buf) + dma = le64_to_cpu(buf->tbl_buf[0]); + else + dma = buf->tbl_dma; + + return cpu_to_le64(dma + (va & pg_mask)); +} + +__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va) +{ + if (buf->tbl_pages > 1) { + u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1; + + return cpu_to_be64(va & pg_mask); + } + + return 0; +} + +int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma) +{ + if (unlikely(buf->tbl_pages == buf->tbl_limit)) + return -ENOMEM; + + if (buf->tbl_buf) + buf->tbl_buf[buf->tbl_pages] = cpu_to_le64(dma); + else + buf->tbl_dma = dma; + + ++buf->tbl_pages; + + return 0; +} + +static int ionic_tbl_buf_alloc(struct ionic_ibdev *dev, + struct ionic_tbl_buf *buf) +{ + int rc; + + buf->tbl_size = buf->tbl_limit * sizeof(*buf->tbl_buf); + buf->tbl_buf = kmalloc(buf->tbl_size, GFP_KERNEL); + if (!buf->tbl_buf) + return -ENOMEM; + + buf->tbl_dma = dma_map_single(dev->lif_cfg.hwdev, buf->tbl_buf, + buf->tbl_size, DMA_TO_DEVICE); + rc = dma_mapping_error(dev->lif_cfg.hwdev, buf->tbl_dma); + if (rc) { + kfree(buf->tbl_buf); + return rc; + } + + return 0; +} + +static int ionic_pgtbl_umem(struct ionic_tbl_buf *buf, struct ib_umem *umem) +{ + struct ib_block_iter biter; + u64 page_dma; + int rc; + + rdma_umem_for_each_dma_block(umem, &biter, BIT_ULL(buf->page_size_log2)) { + page_dma = rdma_block_iter_dma_address(&biter); + rc = ionic_pgtbl_page(buf, page_dma); + if (rc) + return rc; + } + + return 0; +} + +void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf) +{ + if (buf->tbl_buf) + dma_unmap_single(dev->lif_cfg.hwdev, buf->tbl_dma, + buf->tbl_size, DMA_TO_DEVICE); + + kfree(buf->tbl_buf); + memset(buf, 0, sizeof(*buf)); +} + +int ionic_pgtbl_init(struct ionic_ibdev *dev, + struct ionic_tbl_buf *buf, + struct ib_umem *umem, + dma_addr_t dma, + int limit, + u64 page_size) +{ + int rc; + + memset(buf, 0, sizeof(*buf)); + + if (umem) { + limit = ib_umem_num_dma_blocks(umem, page_size); + buf->page_size_log2 = order_base_2(page_size); + } + + if (limit < 1) + return -EINVAL; + + buf->tbl_limit = limit; + + /* skip pgtbl if contiguous / direct translation */ + if (limit > 1) { + rc = ionic_tbl_buf_alloc(dev, buf); + if (rc) + return rc; + } + + if (umem) + rc = ionic_pgtbl_umem(buf, umem); + else + rc = ionic_pgtbl_page(buf, dma); + + if (rc) + goto err_unbuf; + + return 0; + +err_unbuf: + ionic_pgtbl_unbuf(dev, buf); + return rc; +} diff --git a/drivers/infiniband/hw/ionic/ionic_queue.c b/drivers/infiniband/hw/ionic/ionic_queue.c new file mode 100644 index 000000000000..aa897ed2a412 --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_queue.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#include <linux/dma-mapping.h> + +#include "ionic_queue.h" + +int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev, + int depth, size_t stride) +{ + if (depth < 0 || depth > 0xffff) + return -EINVAL; + + if (stride == 0 || stride > 0x10000) + return -EINVAL; + + if (depth == 0) + depth = 1; + + q->depth_log2 = order_base_2(depth + 1); + q->stride_log2 = order_base_2(stride); + + if (q->depth_log2 + q->stride_log2 < PAGE_SHIFT) + q->depth_log2 = PAGE_SHIFT - q->stride_log2; + + if (q->depth_log2 > 16 || q->stride_log2 > 16) + return -EINVAL; + + q->size = BIT_ULL(q->depth_log2 + q->stride_log2); + q->mask = BIT(q->depth_log2) - 1; + + q->ptr = dma_alloc_coherent(dma_dev, q->size, &q->dma, GFP_KERNEL); + if (!q->ptr) + return -ENOMEM; + + /* it will always be page aligned, but just to be sure... */ + if (!PAGE_ALIGNED(q->ptr)) { + dma_free_coherent(dma_dev, q->size, q->ptr, q->dma); + return -ENOMEM; + } + + q->prod = 0; + q->cons = 0; + q->dbell = 0; + + return 0; +} + +void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev) +{ + dma_free_coherent(dma_dev, q->size, q->ptr, q->dma); +} diff --git a/drivers/infiniband/hw/ionic/ionic_queue.h b/drivers/infiniband/hw/ionic/ionic_queue.h new file mode 100644 index 000000000000..d18020d4cad5 --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_queue.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#ifndef _IONIC_QUEUE_H_ +#define _IONIC_QUEUE_H_ + +#include <linux/io.h> +#include <ionic_regs.h> + +#define IONIC_MAX_DEPTH 0xffff +#define IONIC_MAX_CQ_DEPTH 0xffff +#define IONIC_CQ_RING_ARM IONIC_DBELL_RING_1 +#define IONIC_CQ_RING_SOL IONIC_DBELL_RING_2 + +/** + * struct ionic_queue - Ring buffer used between device and driver + * @size: Size of the buffer, in bytes + * @dma: Dma address of the buffer + * @ptr: Buffer virtual address + * @prod: Driver position in the queue + * @cons: Device position in the queue + * @mask: Capacity of the queue, subtracting the hole + * This value is equal to ((1 << depth_log2) - 1) + * @depth_log2: Log base two size depth of the queue + * @stride_log2: Log base two size of an element in the queue + * @dbell: Doorbell identifying bits + */ +struct ionic_queue { + size_t size; + dma_addr_t dma; + void *ptr; + u16 prod; + u16 cons; + u16 mask; + u8 depth_log2; + u8 stride_log2; + u64 dbell; +}; + +/** + * ionic_queue_init() - Initialize user space queue + * @q: Uninitialized queue structure + * @dma_dev: DMA device for mapping + * @depth: Depth of the queue + * @stride: Size of each element of the queue + * + * Return: status code + */ +int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev, + int depth, size_t stride); + +/** + * ionic_queue_destroy() - Destroy user space queue + * @q: Queue structure + * @dma_dev: DMA device for mapping + * + * Return: status code + */ +void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev); + +/** + * ionic_queue_empty() - Test if queue is empty + * @q: Queue structure + * + * This is only valid for to-device queues. + * + * Return: is empty + */ +static inline bool ionic_queue_empty(struct ionic_queue *q) +{ + return q->prod == q->cons; +} + +/** + * ionic_queue_length() - Get the current length of the queue + * @q: Queue structure + * + * This is only valid for to-device queues. + * + * Return: length + */ +static inline u16 ionic_queue_length(struct ionic_queue *q) +{ + return (q->prod - q->cons) & q->mask; +} + +/** + * ionic_queue_length_remaining() - Get the remaining length of the queue + * @q: Queue structure + * + * This is only valid for to-device queues. + * + * Return: length remaining + */ +static inline u16 ionic_queue_length_remaining(struct ionic_queue *q) +{ + return q->mask - ionic_queue_length(q); +} + +/** + * ionic_queue_full() - Test if queue is full + * @q: Queue structure + * + * This is only valid for to-device queues. + * + * Return: is full + */ +static inline bool ionic_queue_full(struct ionic_queue *q) +{ + return q->mask == ionic_queue_length(q); +} + +/** + * ionic_color_wrap() - Flip the color if prod is wrapped + * @prod: Queue index just after advancing + * @color: Queue color just prior to advancing the index + * + * Return: color after advancing the index + */ +static inline bool ionic_color_wrap(u16 prod, bool color) +{ + /* logical xor color with (prod == 0) */ + return color != (prod == 0); +} + +/** + * ionic_queue_at() - Get the element at the given index + * @q: Queue structure + * @idx: Index in the queue + * + * The index must be within the bounds of the queue. It is not checked here. + * + * Return: pointer to element at index + */ +static inline void *ionic_queue_at(struct ionic_queue *q, u16 idx) +{ + return q->ptr + ((unsigned long)idx << q->stride_log2); +} + +/** + * ionic_queue_at_prod() - Get the element at the producer index + * @q: Queue structure + * + * Return: pointer to element at producer index + */ +static inline void *ionic_queue_at_prod(struct ionic_queue *q) +{ + return ionic_queue_at(q, q->prod); +} + +/** + * ionic_queue_at_cons() - Get the element at the consumer index + * @q: Queue structure + * + * Return: pointer to element at consumer index + */ +static inline void *ionic_queue_at_cons(struct ionic_queue *q) +{ + return ionic_queue_at(q, q->cons); +} + +/** + * ionic_queue_next() - Compute the next index + * @q: Queue structure + * @idx: Index + * + * Return: next index after idx + */ +static inline u16 ionic_queue_next(struct ionic_queue *q, u16 idx) +{ + return (idx + 1) & q->mask; +} + +/** + * ionic_queue_produce() - Increase the producer index + * @q: Queue structure + * + * Caller must ensure that the queue is not full. It is not checked here. + */ +static inline void ionic_queue_produce(struct ionic_queue *q) +{ + q->prod = ionic_queue_next(q, q->prod); +} + +/** + * ionic_queue_consume() - Increase the consumer index + * @q: Queue structure + * + * Caller must ensure that the queue is not empty. It is not checked here. + * + * This is only valid for to-device queues. + */ +static inline void ionic_queue_consume(struct ionic_queue *q) +{ + q->cons = ionic_queue_next(q, q->cons); +} + +/** + * ionic_queue_consume_entries() - Increase the consumer index by entries + * @q: Queue structure + * @entries: Number of entries to increment + * + * Caller must ensure that the queue is not empty. It is not checked here. + * + * This is only valid for to-device queues. + */ +static inline void ionic_queue_consume_entries(struct ionic_queue *q, + u16 entries) +{ + q->cons = (q->cons + entries) & q->mask; +} + +/** + * ionic_queue_dbell_init() - Initialize doorbell bits for queue id + * @q: Queue structure + * @qid: Queue identifying number + */ +static inline void ionic_queue_dbell_init(struct ionic_queue *q, u32 qid) +{ + q->dbell = IONIC_DBELL_QID(qid); +} + +/** + * ionic_queue_dbell_val() - Get current doorbell update value + * @q: Queue structure + * + * Return: current doorbell update value + */ +static inline u64 ionic_queue_dbell_val(struct ionic_queue *q) +{ + return q->dbell | q->prod; +} + +#endif /* _IONIC_QUEUE_H_ */ diff --git a/drivers/infiniband/hw/ionic/ionic_res.h b/drivers/infiniband/hw/ionic/ionic_res.h new file mode 100644 index 000000000000..46c8c584bd9a --- /dev/null +++ b/drivers/infiniband/hw/ionic/ionic_res.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */ + +#ifndef _IONIC_RES_H_ +#define _IONIC_RES_H_ + +#include <linux/kernel.h> +#include <linux/idr.h> + +/** + * struct ionic_resid_bits - Number allocator based on IDA + * + * @inuse: IDA handle + * @inuse_size: Highest ID limit for IDA + */ +struct ionic_resid_bits { + struct ida inuse; + unsigned int inuse_size; +}; + +/** + * ionic_resid_init() - Initialize a resid allocator + * @resid: Uninitialized resid allocator + * @size: Capacity of the allocator + * + * Return: Zero on success, or negative error number + */ +static inline void ionic_resid_init(struct ionic_resid_bits *resid, + unsigned int size) +{ + resid->inuse_size = size; + ida_init(&resid->inuse); +} + +/** + * ionic_resid_destroy() - Destroy a resid allocator + * @resid: Resid allocator + */ +static inline void ionic_resid_destroy(struct ionic_resid_bits *resid) +{ + ida_destroy(&resid->inuse); +} + +/** + * ionic_resid_get_shared() - Allocate an available shared resource id + * @resid: Resid allocator + * @min: Smallest valid resource id + * @size: One after largest valid resource id + * + * Return: Resource id, or negative error number + */ +static inline int ionic_resid_get_shared(struct ionic_resid_bits *resid, + unsigned int min, + unsigned int size) +{ + return ida_alloc_range(&resid->inuse, min, size - 1, GFP_KERNEL); +} + +/** + * ionic_resid_get() - Allocate an available resource id + * @resid: Resid allocator + * + * Return: Resource id, or negative error number + */ +static inline int ionic_resid_get(struct ionic_resid_bits *resid) +{ + return ionic_resid_get_shared(resid, 0, resid->inuse_size); +} + +/** + * ionic_resid_put() - Free a resource id + * @resid: Resid allocator + * @id: Resource id + */ +static inline void ionic_resid_put(struct ionic_resid_bits *resid, int id) +{ + ida_free(&resid->inuse, id); +} + +/** + * ionic_bitid_to_qid() - Transform a resource bit index into a queue id + * @bitid: Bit index + * @qgrp_shift: Log2 number of queues per queue group + * @half_qid_shift: Log2 of half the total number of queues + * + * Return: Queue id + * + * Udma-constrained queues (QPs and CQs) are associated with their udma by + * queue group. Even queue groups are associated with udma0, and odd queue + * groups with udma1. + * + * For allocating queue ids, we want to arrange the bits into two halves, + * with the even queue groups of udma0 in the lower half of the bitset, + * and the odd queue groups of udma1 in the upper half of the bitset. + * Then, one or two calls of find_next_zero_bit can examine all the bits + * for queues of an entire udma. + * + * For example, assuming eight queue groups with qgrp qids per group: + * + * bitid 0*qgrp..1*qgrp-1 : qid 0*qgrp..1*qgrp-1 + * bitid 1*qgrp..2*qgrp-1 : qid 2*qgrp..3*qgrp-1 + * bitid 2*qgrp..3*qgrp-1 : qid 4*qgrp..5*qgrp-1 + * bitid 3*qgrp..4*qgrp-1 : qid 6*qgrp..7*qgrp-1 + * bitid 4*qgrp..5*qgrp-1 : qid 1*qgrp..2*qgrp-1 + * bitid 5*qgrp..6*qgrp-1 : qid 3*qgrp..4*qgrp-1 + * bitid 6*qgrp..7*qgrp-1 : qid 5*qgrp..6*qgrp-1 + * bitid 7*qgrp..8*qgrp-1 : qid 7*qgrp..8*qgrp-1 + * + * There are three important ranges of bits in the qid. There is the udma + * bit "U" at qgrp_shift, which is the least significant bit of the group + * index, and determines which udma a queue is associated with. + * The bits of lesser significance we can call the idx bits "I", which are + * the index of the queue within the group. The bits of greater significance + * we can call the grp bits "G", which are other bits of the group index that + * do not determine the udma. Those bits are just rearranged in the bit index + * in the bitset. A bitid has the udma bit in the most significant place, + * then the grp bits, then the idx bits. + * + * bitid: 00000000000000 U GGG IIIIII + * qid: 00000000000000 GGG U IIIIII + * + * Transforming from bit index to qid, or from qid to bit index, can be + * accomplished by rearranging the bits by masking and shifting. + */ +static inline u32 ionic_bitid_to_qid(u32 bitid, u8 qgrp_shift, + u8 half_qid_shift) +{ + u32 udma_bit = + (bitid & BIT(half_qid_shift)) >> (half_qid_shift - qgrp_shift); + u32 grp_bits = (bitid & GENMASK(half_qid_shift - 1, qgrp_shift)) << 1; + u32 idx_bits = bitid & (BIT(qgrp_shift) - 1); + + return grp_bits | udma_bit | idx_bits; +} + +/** + * ionic_qid_to_bitid() - Transform a queue id into a resource bit index + * @qid: queue index + * @qgrp_shift: Log2 number of queues per queue group + * @half_qid_shift: Log2 of half the total number of queues + * + * Return: Resource bit index + * + * This is the inverse of ionic_bitid_to_qid(). + */ +static inline u32 ionic_qid_to_bitid(u32 qid, u8 qgrp_shift, u8 half_qid_shift) +{ + u32 udma_bit = (qid & BIT(qgrp_shift)) << (half_qid_shift - qgrp_shift); + u32 grp_bits = (qid & GENMASK(half_qid_shift, qgrp_shift + 1)) >> 1; + u32 idx_bits = qid & (BIT(qgrp_shift) - 1); + + return udma_bit | grp_bits | idx_bits; +} +#endif /* _IONIC_RES_H_ */ |