diff options
Diffstat (limited to 'drivers/infiniband/hw/efa/efa_verbs.c')
| -rw-r--r-- | drivers/infiniband/hw/efa/efa_verbs.c | 609 |
1 files changed, 480 insertions, 129 deletions
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index be6d3ff0f1be..22d3e25c3b9d 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* - * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved. */ +#include <linux/dma-buf.h> +#include <linux/dma-resv.h> #include <linux/vmalloc.h> #include <linux/log2.h> @@ -11,8 +13,12 @@ #include <rdma/ib_user_verbs.h> #include <rdma/ib_verbs.h> #include <rdma/uverbs_ioctl.h> +#define UVERBS_MODULE_NAME efa_ib +#include <rdma/uverbs_named_ioctl.h> +#include <rdma/ib_user_ioctl_cmds.h> #include "efa.h" +#include "efa_io_defs.h" enum { EFA_MMAP_DMA_PAGE = 0, @@ -20,17 +26,27 @@ enum { EFA_MMAP_IO_NC, }; -#define EFA_AENQ_ENABLED_GROUPS \ - (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \ - BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE)) - struct efa_user_mmap_entry { struct rdma_user_mmap_entry rdma_entry; u64 address; u8 mmap_flag; }; -#define EFA_DEFINE_STATS(op) \ +#define EFA_DEFINE_DEVICE_STATS(op) \ + op(EFA_SUBMITTED_CMDS, "submitted_cmds") \ + op(EFA_COMPLETED_CMDS, "completed_cmds") \ + op(EFA_CMDS_ERR, "cmds_err") \ + op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \ + op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \ + op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \ + op(EFA_CREATE_QP_ERR, "create_qp_err") \ + op(EFA_CREATE_CQ_ERR, "create_cq_err") \ + op(EFA_REG_MR_ERR, "reg_mr_err") \ + op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \ + op(EFA_CREATE_AH_ERR, "create_ah_err") \ + op(EFA_MMAP_ERR, "mmap_err") + +#define EFA_DEFINE_PORT_STATS(op) \ op(EFA_TX_BYTES, "tx_bytes") \ op(EFA_TX_PKTS, "tx_pkts") \ op(EFA_RX_BYTES, "rx_bytes") \ @@ -44,30 +60,38 @@ struct efa_user_mmap_entry { op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \ op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \ op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \ - op(EFA_SUBMITTED_CMDS, "submitted_cmds") \ - op(EFA_COMPLETED_CMDS, "completed_cmds") \ - op(EFA_CMDS_ERR, "cmds_err") \ - op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \ - op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \ - op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \ - op(EFA_CREATE_QP_ERR, "create_qp_err") \ - op(EFA_CREATE_CQ_ERR, "create_cq_err") \ - op(EFA_REG_MR_ERR, "reg_mr_err") \ - op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \ - op(EFA_CREATE_AH_ERR, "create_ah_err") \ - op(EFA_MMAP_ERR, "mmap_err") + op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \ + op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \ + op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \ + op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \ + op(EFA_RETRANS_BYTES, "retrans_bytes") \ + op(EFA_RETRANS_PKTS, "retrans_pkts") \ + op(EFA_RETRANS_TIMEOUT_EVENS, "retrans_timeout_events") \ + op(EFA_UNRESPONSIVE_REMOTE_EVENTS, "unresponsive_remote_events") \ + op(EFA_IMPAIRED_REMOTE_CONN_EVENTS, "impaired_remote_conn_events") \ #define EFA_STATS_ENUM(ename, name) ename, -#define EFA_STATS_STR(ename, name) [ename] = name, +#define EFA_STATS_STR(ename, nam) \ + [ename].name = nam, + +enum efa_hw_device_stats { + EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM) +}; -enum efa_hw_stats { - EFA_DEFINE_STATS(EFA_STATS_ENUM) +static const struct rdma_stat_desc efa_device_stats_descs[] = { + EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR) }; -static const char *const efa_stats_names[] = { - EFA_DEFINE_STATS(EFA_STATS_STR) +enum efa_hw_port_stats { + EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM) }; +static const struct rdma_stat_desc efa_port_stats_descs[] = { + EFA_DEFINE_PORT_STATS(EFA_STATS_STR) +}; + +#define EFA_DEFAULT_LINK_SPEED_GBPS 100 + #define EFA_CHUNK_PAYLOAD_SHIFT 12 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT) #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8 @@ -229,12 +253,26 @@ int efa_query_device(struct ib_device *ibdev, resp.max_rq_wr = dev_attr->max_rq_depth; resp.max_rdma_size = dev_attr->max_rdma_size; + resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID; + resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM; if (EFA_DEV_CAP(dev, RDMA_READ)) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ; if (EFA_DEV_CAP(dev, RNR_RETRY)) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY; + if (EFA_DEV_CAP(dev, DATA_POLLING_128)) + resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128; + + if (EFA_DEV_CAP(dev, RDMA_WRITE)) + resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE; + + if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV)) + resp.device_caps |= EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV; + + if (dev->neqs) + resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS; + err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { @@ -247,10 +285,47 @@ int efa_query_device(struct ib_device *ibdev, return 0; } +static void efa_link_gbps_to_speed_and_width(u16 gbps, + enum ib_port_speed *speed, + enum ib_port_width *width) +{ + if (gbps >= 400) { + *width = IB_WIDTH_8X; + *speed = IB_SPEED_HDR; + } else if (gbps >= 200) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_HDR; + } else if (gbps >= 120) { + *width = IB_WIDTH_12X; + *speed = IB_SPEED_FDR10; + } else if (gbps >= 100) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_EDR; + } else if (gbps >= 60) { + *width = IB_WIDTH_12X; + *speed = IB_SPEED_DDR; + } else if (gbps >= 50) { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_HDR; + } else if (gbps >= 40) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_FDR10; + } else if (gbps >= 30) { + *width = IB_WIDTH_12X; + *speed = IB_SPEED_SDR; + } else { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_EDR; + } +} + int efa_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { struct efa_dev *dev = to_edev(ibdev); + enum ib_port_speed link_speed; + enum ib_port_width link_width; + u16 link_gbps; props->lmc = 1; @@ -258,8 +333,10 @@ int efa_query_port(struct ib_device *ibdev, u32 port, props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; props->gid_tbl_len = 1; props->pkey_tbl_len = 1; - props->active_speed = IB_SPEED_EDR; - props->active_width = IB_WIDTH_4X; + link_gbps = dev->dev_attr.max_link_speed_gbps ?: EFA_DEFAULT_LINK_SPEED_GBPS; + efa_link_gbps_to_speed_and_width(link_gbps, &link_speed, &link_width); + props->active_speed = link_speed; + props->active_width = link_width; props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu); props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu); props->max_msg_sz = dev->dev_attr.mtu; @@ -425,12 +502,12 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num); - efa_qp_user_mmap_entries_remove(qp); - err = efa_destroy_qp_handle(dev, qp->qp_handle); if (err) return err; + efa_qp_user_mmap_entries_remove(qp); + if (qp->rq_cpu_addr) { ibdev_dbg(&dev->ibdev, "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n", @@ -440,7 +517,6 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) qp->rq_size, DMA_TO_DEVICE); } - kfree(qp); return 0; } @@ -491,7 +567,7 @@ static int qp_mmap_entries_setup(struct efa_qp *qp, address = dev->mem_bar_addr + resp->llq_desc_offset; length = PAGE_ALIGN(params->sq_ring_size_in_bytes + - (resp->llq_desc_offset & ~PAGE_MASK)); + offset_in_page(resp->llq_desc_offset)); qp->llq_desc_mmap_entry = efa_user_mmap_entry_insert(&ucontext->ibucontext, @@ -599,17 +675,17 @@ static int efa_qp_validate_attr(struct efa_dev *dev, return 0; } -struct ib_qp *efa_create_qp(struct ib_pd *ibpd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata) +int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) { struct efa_com_create_qp_params create_qp_params = {}; struct efa_com_create_qp_result create_qp_resp; - struct efa_dev *dev = to_edev(ibpd->device); + struct efa_dev *dev = to_edev(ibqp->device); struct efa_ibv_create_qp_resp resp = {}; struct efa_ibv_create_qp cmd = {}; + struct efa_qp *qp = to_eqp(ibqp); struct efa_ucontext *ucontext; - struct efa_qp *qp; + u16 supported_efa_flags = 0; int err; ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext, @@ -647,21 +723,25 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, goto err_out; } - if (cmd.comp_mask) { + if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_98)) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } - qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) { - err = -ENOMEM; + if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV)) + supported_efa_flags |= EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV; + + if (cmd.flags & ~supported_efa_flags) { + ibdev_dbg(&dev->ibdev, "Unsupported EFA QP create flags[%#x], supported[%#x]\n", + cmd.flags, supported_efa_flags); + err = -EOPNOTSUPP; goto err_out; } create_qp_params.uarn = ucontext->uarn; - create_qp_params.pd = to_epd(ibpd)->pdn; + create_qp_params.pd = to_epd(ibqp->pd)->pdn; if (init_attr->qp_type == IB_QPT_UD) { create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD; @@ -672,7 +752,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, "Unsupported qp type %d driver qp type %d\n", init_attr->qp_type, cmd.driver_qp_type); err = -EOPNOTSUPP; - goto err_free_qp; + goto err_out; } ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n", @@ -690,7 +770,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, qp->rq_size, DMA_TO_DEVICE); if (!qp->rq_cpu_addr) { err = -ENOMEM; - goto err_free_qp; + goto err_out; } ibdev_dbg(&dev->ibdev, @@ -699,6 +779,11 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, create_qp_params.rq_base_addr = qp->rq_dma_addr; } + create_qp_params.sl = cmd.sl; + + if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV) + create_qp_params.unsolicited_write_recv = true; + err = efa_com_create_qp(&dev->edev, &create_qp_params, &create_qp_resp); if (err) @@ -717,7 +802,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, qp->qp_handle = create_qp_resp.qp_handle; qp->ibqp.qp_num = create_qp_resp.qp_num; - qp->ibqp.qp_type = init_attr->qp_type; qp->max_send_wr = init_attr->cap.max_send_wr; qp->max_recv_wr = init_attr->cap.max_recv_wr; qp->max_send_sge = init_attr->cap.max_send_sge; @@ -737,7 +821,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num); - return &qp->ibqp; + return 0; err_remove_mmap_entries: efa_qp_user_mmap_entries_remove(qp); @@ -747,11 +831,9 @@ err_free_mapped: if (qp->rq_size) efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr, qp->rq_size, DMA_TO_DEVICE); -err_free_qp: - kfree(qp); err_out: atomic64_inc(&dev->stats.create_qp_err); - return ERR_PTR(err); + return err; } static const struct { @@ -985,6 +1067,12 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx) return efa_com_destroy_cq(&dev->edev, ¶ms); } +static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq) +{ + rdma_user_mmap_entry_remove(cq->db_mmap_entry); + rdma_user_mmap_entry_remove(cq->mmap_entry); +} + int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibcq->device); @@ -994,15 +1082,28 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n", cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); - rdma_user_mmap_entry_remove(cq->mmap_entry); efa_destroy_cq_idx(dev, cq->cq_idx); - efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, - DMA_FROM_DEVICE); + efa_cq_user_mmap_entries_remove(cq); + if (cq->eq) { + xa_erase(&dev->cqs_xa, cq->cq_idx); + synchronize_irq(cq->eq->irq.irqn); + } + + if (cq->umem) + ib_umem_release(cq->umem); + else + efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE); return 0; } +static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec) +{ + return &dev->eqs[vec]; +} + static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, - struct efa_ibv_create_cq_resp *resp) + struct efa_ibv_create_cq_resp *resp, + bool db_valid) { resp->q_mmap_size = cq->size; cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext, @@ -1012,22 +1113,39 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, if (!cq->mmap_entry) return -ENOMEM; + if (db_valid) { + cq->db_mmap_entry = + efa_user_mmap_entry_insert(&cq->ucontext->ibucontext, + dev->db_bar_addr + resp->db_off, + PAGE_SIZE, EFA_MMAP_IO_NC, + &resp->db_mmap_key); + if (!cq->db_mmap_entry) { + rdma_user_mmap_entry_remove(cq->mmap_entry); + return -ENOMEM; + } + + resp->db_off &= ~PAGE_MASK; + resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF; + } + return 0; } -int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, - struct ib_udata *udata) +int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_umem *umem, struct uverbs_attr_bundle *attrs) { + struct ib_udata *udata = &attrs->driver_udata; struct efa_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct efa_ucontext, ibucontext); + struct efa_com_create_cq_params params = {}; struct efa_ibv_create_cq_resp resp = {}; - struct efa_com_create_cq_params params; struct efa_com_create_cq_result result; struct ib_device *ibdev = ibcq->device; struct efa_dev *dev = to_edev(ibdev); struct efa_ibv_create_cq cmd = {}; struct efa_cq *cq = to_ecq(ibcq); int entries = attr->cqe; + bool set_src_addr; int err; ibdev_dbg(ibdev, "create_cq entries %d\n", entries); @@ -1066,14 +1184,17 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto err_out; } - if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) { + if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) { ibdev_dbg(ibdev, "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } - if (!cmd.cq_entry_size) { + set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID); + if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) && + (set_src_addr || + cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) { ibdev_dbg(ibdev, "Invalid entry size [%u]\n", cmd.cq_entry_size); err = -EINVAL; @@ -1090,41 +1211,79 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, cq->ucontext = ucontext; cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs); - cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size, - DMA_FROM_DEVICE); - if (!cq->cpu_addr) { - err = -ENOMEM; - goto err_out; + + if (umem) { + if (umem->length < cq->size) { + ibdev_dbg(&dev->ibdev, "External memory too small\n"); + err = -EINVAL; + goto err_out; + } + + if (!ib_umem_is_contiguous(umem)) { + ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n"); + err = -EINVAL; + goto err_out; + } + + cq->cpu_addr = NULL; + cq->dma_addr = ib_umem_start_dma_addr(umem); + cq->umem = umem; + } else { + cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size, + DMA_FROM_DEVICE); + if (!cq->cpu_addr) { + err = -ENOMEM; + goto err_out; + } } params.uarn = cq->ucontext->uarn; - params.cq_depth = entries; + params.sub_cq_depth = entries; params.dma_addr = cq->dma_addr; params.entry_size_in_bytes = cmd.cq_entry_size; params.num_sub_cqs = cmd.num_sub_cqs; + params.set_src_addr = set_src_addr; + if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) { + cq->eq = efa_vec2eq(dev, attr->comp_vector); + params.eqn = cq->eq->eeq.eqn; + params.interrupt_mode_enabled = true; + } + err = efa_com_create_cq(&dev->edev, ¶ms, &result); if (err) goto err_free_mapped; + resp.db_off = result.db_off; resp.cq_idx = result.cq_idx; cq->cq_idx = result.cq_idx; cq->ibcq.cqe = result.actual_depth; WARN_ON_ONCE(entries != result.actual_depth); - err = cq_mmap_entries_setup(dev, cq, &resp); + if (!umem) + err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid); + if (err) { ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n", cq->cq_idx); goto err_destroy_cq; } + if (cq->eq) { + err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL)); + if (err) { + ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n", + cq->cq_idx); + goto err_remove_mmap; + } + } + if (udata->outlen) { err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { ibdev_dbg(ibdev, "Failed to copy udata for create_cq\n"); - goto err_remove_mmap; + goto err_xa_erase; } } @@ -1133,19 +1292,28 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, return 0; +err_xa_erase: + if (cq->eq) + xa_erase(&dev->cqs_xa, cq->cq_idx); err_remove_mmap: - rdma_user_mmap_entry_remove(cq->mmap_entry); + efa_cq_user_mmap_entries_remove(cq); err_destroy_cq: efa_destroy_cq_idx(dev, cq->cq_idx); err_free_mapped: - efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, - DMA_FROM_DEVICE); - + if (!umem) + efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, + DMA_FROM_DEVICE); err_out: atomic64_inc(&dev->stats.create_cq_err); return err; } +int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct uverbs_attr_bundle *attrs) +{ + return efa_create_cq_umem(ibcq, attr, NULL, attrs); +} + static int umem_to_page_list(struct efa_dev *dev, struct ib_umem *umem, u64 *page_list, @@ -1335,7 +1503,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev, */ static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) { - u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE); + u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE); struct scatterlist *sgl; int sg_dma_cnt, err; @@ -1491,54 +1659,50 @@ static int efa_create_pbl(struct efa_dev *dev, return 0; } -struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, - u64 virt_addr, int access_flags, - struct ib_udata *udata) +static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags, + struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibpd->device); - struct efa_com_reg_mr_params params = {}; - struct efa_com_reg_mr_result result = {}; - struct pbl_context pbl; int supp_access_flags; - unsigned int pg_sz; struct efa_mr *mr; - int inline_size; - int err; if (udata && udata->inlen && !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, udata not cleared\n"); - err = -EINVAL; - goto err_out; + return ERR_PTR(-EINVAL); } supp_access_flags = IB_ACCESS_LOCAL_WRITE | - (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0); + (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) | + (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0); access_flags &= ~IB_ACCESS_OPTIONAL; if (access_flags & ~supp_access_flags) { ibdev_dbg(&dev->ibdev, "Unsupported access flags[%#x], supported[%#x]\n", access_flags, supp_access_flags); - err = -EOPNOTSUPP; - goto err_out; + return ERR_PTR(-EOPNOTSUPP); } mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; - goto err_out; - } + if (!mr) + return ERR_PTR(-ENOMEM); - mr->umem = ib_umem_get(ibpd->device, start, length, access_flags); - if (IS_ERR(mr->umem)) { - err = PTR_ERR(mr->umem); - ibdev_dbg(&dev->ibdev, - "Failed to pin and map user space memory[%d]\n", err); - goto err_free; - } + return mr; +} + +static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start, + u64 length, u64 virt_addr, int access_flags) +{ + struct efa_dev *dev = to_edev(ibpd->device); + struct efa_com_reg_mr_params params = {}; + struct efa_com_reg_mr_result result = {}; + struct pbl_context pbl; + unsigned int pg_sz; + int inline_size; + int err; params.pd = to_epd(ibpd)->pdn; params.iova = virt_addr; @@ -1549,10 +1713,9 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, dev->dev_attr.page_size_cap, virt_addr); if (!pg_sz) { - err = -EOPNOTSUPP; ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n", dev->dev_attr.page_size_cap); - goto err_unmap; + return -EOPNOTSUPP; } params.page_shift = order_base_2(pg_sz); @@ -1566,31 +1729,120 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, if (params.page_num <= inline_size) { err = efa_create_inline_pbl(dev, mr, ¶ms); if (err) - goto err_unmap; + return err; err = efa_com_register_mr(&dev->edev, ¶ms, &result); if (err) - goto err_unmap; + return err; } else { err = efa_create_pbl(dev, &pbl, mr, ¶ms); if (err) - goto err_unmap; + return err; err = efa_com_register_mr(&dev->edev, ¶ms, &result); pbl_destroy(dev, &pbl); if (err) - goto err_unmap; + return err; } mr->ibmr.lkey = result.l_key; mr->ibmr.rkey = result.r_key; mr->ibmr.length = length; + mr->ic_info.recv_ic_id = result.ic_info.recv_ic_id; + mr->ic_info.rdma_read_ic_id = result.ic_info.rdma_read_ic_id; + mr->ic_info.rdma_recv_ic_id = result.ic_info.rdma_recv_ic_id; + mr->ic_info.recv_ic_id_valid = result.ic_info.recv_ic_id_valid; + mr->ic_info.rdma_read_ic_id_valid = result.ic_info.rdma_read_ic_id_valid; + mr->ic_info.rdma_recv_ic_id_valid = result.ic_info.rdma_recv_ic_id_valid; ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey); + return 0; +} + +struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, + u64 length, u64 virt_addr, + int fd, int access_flags, + struct ib_dmah *dmah, + struct uverbs_attr_bundle *attrs) +{ + struct efa_dev *dev = to_edev(ibpd->device); + struct ib_umem_dmabuf *umem_dmabuf; + struct efa_mr *mr; + int err; + + if (dmah) { + err = -EOPNOTSUPP; + goto err_out; + } + + mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata); + if (IS_ERR(mr)) { + err = PTR_ERR(mr); + goto err_out; + } + + umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd, + access_flags); + if (IS_ERR(umem_dmabuf)) { + err = PTR_ERR(umem_dmabuf); + ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%pe]\n", + umem_dmabuf); + goto err_free; + } + + mr->umem = &umem_dmabuf->umem; + err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags); + if (err) + goto err_release; + + return &mr->ibmr; + +err_release: + ib_umem_release(mr->umem); +err_free: + kfree(mr); +err_out: + atomic64_inc(&dev->stats.reg_mr_err); + return ERR_PTR(err); +} + +struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_dmah *dmah, + struct ib_udata *udata) +{ + struct efa_dev *dev = to_edev(ibpd->device); + struct efa_mr *mr; + int err; + + if (dmah) { + err = -EOPNOTSUPP; + goto err_out; + } + + mr = efa_alloc_mr(ibpd, access_flags, udata); + if (IS_ERR(mr)) { + err = PTR_ERR(mr); + goto err_out; + } + + mr->umem = ib_umem_get(ibpd->device, start, length, access_flags); + if (IS_ERR(mr->umem)) { + err = PTR_ERR(mr->umem); + ibdev_dbg(&dev->ibdev, + "Failed to pin and map user space memory[%pe]\n", + mr->umem); + goto err_free; + } + + err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags); + if (err) + goto err_release; + return &mr->ibmr; -err_unmap: +err_release: ib_umem_release(mr->umem); err_free: kfree(mr); @@ -1599,6 +1851,39 @@ err_out: return ERR_PTR(err); } +static int UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)(struct uverbs_attr_bundle *attrs) +{ + struct ib_mr *ibmr = uverbs_attr_get_obj(attrs, EFA_IB_ATTR_QUERY_MR_HANDLE); + struct efa_mr *mr = to_emr(ibmr); + u16 ic_id_validity = 0; + int ret; + + ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID, + &mr->ic_info.recv_ic_id, sizeof(mr->ic_info.recv_ic_id)); + if (ret) + return ret; + + ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID, + &mr->ic_info.rdma_read_ic_id, sizeof(mr->ic_info.rdma_read_ic_id)); + if (ret) + return ret; + + ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID, + &mr->ic_info.rdma_recv_ic_id, sizeof(mr->ic_info.rdma_recv_ic_id)); + if (ret) + return ret; + + if (mr->ic_info.recv_ic_id_valid) + ic_id_validity |= EFA_QUERY_MR_VALIDITY_RECV_IC_ID; + if (mr->ic_info.rdma_read_ic_id_valid) + ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID; + if (mr->ic_info.rdma_recv_ic_id_valid) + ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID; + + return uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY, + &ic_id_validity, sizeof(ic_id_validity)); +} + int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibmr->device); @@ -1904,33 +2189,55 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags) return 0; } -struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) +struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, + u32 port_num) { - return rdma_alloc_hw_stats_struct(efa_stats_names, - ARRAY_SIZE(efa_stats_names), + return rdma_alloc_hw_stats_struct(efa_port_stats_descs, + ARRAY_SIZE(efa_port_stats_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); } struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev) { - /* - * It is probably a bug that efa reports its port stats as device - * stats - */ - return efa_alloc_hw_port_stats(ibdev, 0); + return rdma_alloc_hw_stats_struct(efa_device_stats_descs, + ARRAY_SIZE(efa_device_stats_descs), + RDMA_HW_STATS_DEFAULT_LIFESPAN); } -int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, - u32 port_num, int index) +static int efa_fill_device_stats(struct efa_dev *dev, + struct rdma_hw_stats *stats) +{ + struct efa_com_stats_admin *as = &dev->edev.aq.stats; + struct efa_stats *s = &dev->stats; + + stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd); + stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd); + stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err); + stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion); + + stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd); + stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err); + stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err); + stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err); + stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err); + stats->value[EFA_ALLOC_UCONTEXT_ERR] = + atomic64_read(&s->alloc_ucontext_err); + stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err); + stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err); + + return ARRAY_SIZE(efa_device_stats_descs); +} + +static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats, + u32 port_num) { struct efa_com_get_stats_params params = {}; union efa_com_get_stats_result result; - struct efa_dev *dev = to_edev(ibdev); + struct efa_com_rdma_write_stats *rws; struct efa_com_rdma_read_stats *rrs; struct efa_com_messages_stats *ms; + struct efa_com_network_stats *ns; struct efa_com_basic_stats *bs; - struct efa_com_stats_admin *as; - struct efa_stats *s; int err; params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL; @@ -1969,24 +2276,41 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err; stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes; - as = &dev->edev.aq.stats; - stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd); - stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd); - stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err); - stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion); + if (EFA_DEV_CAP(dev, RDMA_WRITE)) { + params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE; + err = efa_com_get_stats(&dev->edev, ¶ms, &result); + if (err) + return err; - s = &dev->stats; - stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd); - stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err); - stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err); - stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err); - stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err); - stats->value[EFA_ALLOC_UCONTEXT_ERR] = - atomic64_read(&s->alloc_ucontext_err); - stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err); - stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err); + rws = &result.rdma_write_stats; + stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs; + stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes; + stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err; + stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes; + } + + params.type = EFA_ADMIN_GET_STATS_TYPE_NETWORK; + err = efa_com_get_stats(&dev->edev, ¶ms, &result); + if (err) + return err; + + ns = &result.network_stats; + stats->value[EFA_RETRANS_BYTES] = ns->retrans_bytes; + stats->value[EFA_RETRANS_PKTS] = ns->retrans_pkts; + stats->value[EFA_RETRANS_TIMEOUT_EVENS] = ns->retrans_timeout_events; + stats->value[EFA_UNRESPONSIVE_REMOTE_EVENTS] = ns->unresponsive_remote_events; + stats->value[EFA_IMPAIRED_REMOTE_CONN_EVENTS] = ns->impaired_remote_conn_events; - return ARRAY_SIZE(efa_stats_names); + return ARRAY_SIZE(efa_port_stats_descs); +} + +int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u32 port_num, int index) +{ + if (port_num) + return efa_fill_port_stats(to_edev(ibdev), stats, port_num); + else + return efa_fill_device_stats(to_edev(ibdev), stats); } enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, @@ -1995,3 +2319,30 @@ enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_UNSPECIFIED; } +DECLARE_UVERBS_NAMED_METHOD(EFA_IB_METHOD_MR_QUERY, + UVERBS_ATTR_IDR(EFA_IB_ATTR_QUERY_MR_HANDLE, + UVERBS_OBJECT_MR, + UVERBS_ACCESS_READ, + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY, + UVERBS_ATTR_TYPE(u16), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID, + UVERBS_ATTR_TYPE(u16), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID, + UVERBS_ATTR_TYPE(u16), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID, + UVERBS_ATTR_TYPE(u16), + UA_MANDATORY)); + +ADD_UVERBS_METHODS(efa_mr, + UVERBS_OBJECT_MR, + &UVERBS_METHOD(EFA_IB_METHOD_MR_QUERY)); + +const struct uapi_definition efa_uapi_defs[] = { + UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR, + &efa_mr), + {}, +}; |
