diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_verbs.c')
| -rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.c | 1812 |
1 files changed, 1002 insertions, 810 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index af90a7d42b96..38d8c408320f 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1,201 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include <linux/dma-mapping.h> #include <net/addrconf.h> +#include <rdma/uverbs_ioctl.h> + #include "rxe.h" -#include "rxe_loc.h" #include "rxe_queue.h" #include "rxe_hw_counters.h" -static int rxe_query_device(struct ib_device *dev, +static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr); + +/* dev */ +static int rxe_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, - struct ib_udata *uhw) + struct ib_udata *udata) { - struct rxe_dev *rxe = to_rdev(dev); + struct rxe_dev *rxe = to_rdev(ibdev); + int err; - if (uhw->inlen || uhw->outlen) - return -EINVAL; + if (udata->inlen || udata->outlen) { + rxe_dbg_dev(rxe, "malformed udata\n"); + err = -EINVAL; + goto err_out; + } + + memcpy(attr, &rxe->attr, sizeof(*attr)); - *attr = rxe->attr; return 0; -} -static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed, - u8 *active_width) -{ - if (speed <= 1000) { - *active_width = IB_WIDTH_1X; - *active_speed = IB_SPEED_SDR; - } else if (speed <= 10000) { - *active_width = IB_WIDTH_1X; - *active_speed = IB_SPEED_FDR10; - } else if (speed <= 20000) { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_DDR; - } else if (speed <= 30000) { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_QDR; - } else if (speed <= 40000) { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_FDR10; - } else { - *active_width = IB_WIDTH_4X; - *active_speed = IB_SPEED_EDR; - } +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } -static int rxe_query_port(struct ib_device *dev, - u8 port_num, struct ib_port_attr *attr) +static int rxe_query_port(struct ib_device *ibdev, + u32 port_num, struct ib_port_attr *attr) { - struct rxe_dev *rxe = to_rdev(dev); - struct rxe_port *port; - u32 speed; + struct rxe_dev *rxe = to_rdev(ibdev); + struct net_device *ndev; + int err, ret; - if (unlikely(port_num != 1)) { - pr_warn("invalid port_number %d\n", port_num); - goto err1; + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); + goto err_out; } - port = &rxe->port; + ndev = rxe_ib_device_get_netdev(ibdev); + if (!ndev) { + err = -ENODEV; + goto err_out; + } - /* *attr being zeroed by the caller, avoid zeroing it here */ - *attr = port->attr; + memcpy(attr, &rxe->port.attr, sizeof(*attr)); mutex_lock(&rxe->usdev_lock); - if (rxe->ndev->ethtool_ops->get_link_ksettings) { - struct ethtool_link_ksettings ks; + ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed, + &attr->active_width); - rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks); - speed = ks.base.speed; - } else if (rxe->ndev->ethtool_ops->get_settings) { - struct ethtool_cmd cmd; + attr->state = ib_get_curr_port_state(ndev); + if (attr->state == IB_PORT_ACTIVE) + attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; + else if (netif_get_flags(ndev) & IFF_UP) + attr->phys_state = IB_PORT_PHYS_STATE_POLLING; + else + attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; - rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd); - speed = cmd.speed; - } else { - pr_warn("%s speed is unknown, defaulting to 1000\n", - rxe->ndev->name); - speed = 1000; - } - rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, - &attr->active_width); mutex_unlock(&rxe->usdev_lock); - return 0; - -err1: - return -EINVAL; -} - -static int rxe_query_gid(struct ib_device *device, - u8 port_num, int index, union ib_gid *gid) -{ - int ret; - - if (index > RXE_PORT_GID_TBL_LEN) - return -EINVAL; - - ret = ib_get_cached_gid(device, port_num, index, gid, NULL); - if (ret == -EAGAIN) { - memcpy(gid, &zgid, sizeof(*gid)); - return 0; - } - + dev_put(ndev); return ret; -} -static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int - index, const union ib_gid *gid, - const struct ib_gid_attr *attr, void **context) -{ - if (index >= RXE_PORT_GID_TBL_LEN) - return -EINVAL; - return 0; -} - -static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int - index, void **context) -{ - if (index >= RXE_PORT_GID_TBL_LEN) - return -EINVAL; - return 0; +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } -static struct net_device *rxe_get_netdev(struct ib_device *device, - u8 port_num) +static int rxe_query_gid(struct ib_device *ibdev, u32 port, int idx, + union ib_gid *gid) { - struct rxe_dev *rxe = to_rdev(device); + struct rxe_dev *rxe = to_rdev(ibdev); - if (rxe->ndev) { - dev_hold(rxe->ndev); - return rxe->ndev; - } + /* subnet_prefix == interface_id == 0; */ + memset(gid, 0, sizeof(*gid)); + memcpy(gid->raw, rxe->raw_gid, ETH_ALEN); - return NULL; + return 0; } -static int rxe_query_pkey(struct ib_device *device, - u8 port_num, u16 index, u16 *pkey) +static int rxe_query_pkey(struct ib_device *ibdev, + u32 port_num, u16 index, u16 *pkey) { - struct rxe_dev *rxe = to_rdev(device); - struct rxe_port *port; - - if (unlikely(port_num != 1)) { - dev_warn(device->dev.parent, "invalid port_num = %d\n", - port_num); - goto err1; - } - - port = &rxe->port; + struct rxe_dev *rxe = to_rdev(ibdev); + int err; - if (unlikely(index >= port->attr.pkey_tbl_len)) { - dev_warn(device->dev.parent, "invalid index = %d\n", - index); - goto err1; + if (index != 0) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad pkey index = %d\n", index); + goto err_out; } - *pkey = port->pkey_tbl[index]; + *pkey = IB_DEFAULT_PKEY_FULL; return 0; -err1: - return -EINVAL; +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } -static int rxe_modify_device(struct ib_device *dev, +static int rxe_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *attr) { - struct rxe_dev *rxe = to_rdev(dev); + struct rxe_dev *rxe = to_rdev(ibdev); + int err; + + if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | + IB_DEVICE_MODIFY_NODE_DESC)) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask); + goto err_out; + } if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid); @@ -206,21 +134,33 @@ static int rxe_modify_device(struct ib_device *dev, } return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } -static int rxe_modify_port(struct ib_device *dev, - u8 port_num, int mask, struct ib_port_modify *attr) +static int rxe_modify_port(struct ib_device *ibdev, u32 port_num, + int mask, struct ib_port_modify *attr) { - struct rxe_dev *rxe = to_rdev(dev); + struct rxe_dev *rxe = to_rdev(ibdev); struct rxe_port *port; + int err; - if (unlikely(port_num != 1)) { - pr_warn("invalid port_num = %d\n", port_num); - goto err1; + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); + goto err_out; } - port = &rxe->port; + //TODO is shutdown useful + if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask); + goto err_out; + } + port = &rxe->port; port->attr.port_cap_flags |= attr->set_port_cap_mask; port->attr.port_cap_flags &= ~attr->clr_port_cap_mask; @@ -229,311 +169,349 @@ static int rxe_modify_port(struct ib_device *dev, return 0; -err1: - return -EINVAL; -} - -static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, - u8 port_num) -{ - struct rxe_dev *rxe = to_rdev(dev); - - return rxe_link_layer(rxe, port_num); +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } -static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev, - struct ib_udata *udata) +static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev, + u32 port_num) { - struct rxe_dev *rxe = to_rdev(dev); - struct rxe_ucontext *uc; + struct rxe_dev *rxe = to_rdev(ibdev); + int err; - uc = rxe_alloc(&rxe->uc_pool); - return uc ? &uc->ibuc : ERR_PTR(-ENOMEM); -} + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); + goto err_out; + } -static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc) -{ - struct rxe_ucontext *uc = to_ruc(ibuc); + return IB_LINK_LAYER_ETHERNET; - rxe_drop_ref(uc); - return 0; +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } -static int rxe_port_immutable(struct ib_device *dev, u8 port_num, +static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { + struct rxe_dev *rxe = to_rdev(ibdev); + struct ib_port_attr attr = {}; int err; - struct ib_port_attr attr; - immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + if (port_num != 1) { + err = -EINVAL; + rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num); + goto err_out; + } - err = ib_query_port(dev, port_num, &attr); + err = ib_query_port(ibdev, port_num, &attr); if (err) - return err; + goto err_out; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->max_mad_size = IB_MGMT_MAD_SIZE; return 0; + +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } -static struct ib_pd *rxe_alloc_pd(struct ib_device *dev, - struct ib_ucontext *context, - struct ib_udata *udata) +/* uc */ +static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) { - struct rxe_dev *rxe = to_rdev(dev); - struct rxe_pd *pd; + struct rxe_dev *rxe = to_rdev(ibuc->device); + struct rxe_ucontext *uc = to_ruc(ibuc); + int err; - pd = rxe_alloc(&rxe->pd_pool); - return pd ? &pd->ibpd : ERR_PTR(-ENOMEM); + err = rxe_add_to_pool(&rxe->uc_pool, uc); + if (err) + rxe_err_dev(rxe, "unable to create uc\n"); + + return err; } -static int rxe_dealloc_pd(struct ib_pd *ibpd) +static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) { - struct rxe_pd *pd = to_rpd(ibpd); + struct rxe_ucontext *uc = to_ruc(ibuc); + int err; - rxe_drop_ref(pd); - return 0; + err = rxe_cleanup(uc); + if (err) + rxe_err_uc(uc, "cleanup failed, err = %d\n", err); } -static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr, - struct rxe_av *av) +/* pd */ +static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { + struct rxe_dev *rxe = to_rdev(ibpd->device); + struct rxe_pd *pd = to_rpd(ibpd); int err; - union ib_gid sgid; - struct ib_gid_attr sgid_attr; - err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr), - rdma_ah_read_grh(attr)->sgid_index, &sgid, - &sgid_attr); + err = rxe_add_to_pool(&rxe->pd_pool, pd); if (err) { - pr_err("Failed to query sgid. err = %d\n", err); - return err; + rxe_dbg_dev(rxe, "unable to alloc pd\n"); + goto err_out; } - err = rxe_av_from_attr(rxe, rdma_ah_get_port_num(attr), av, attr); - if (!err) - err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid); + return 0; - if (sgid_attr.ndev) - dev_put(sgid_attr.ndev); +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); return err; } -static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, - struct rdma_ah_attr *attr, - struct ib_udata *udata) - +static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { - int err; - struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_ah *ah; + int err; - err = rxe_av_chk_attr(rxe, attr); + err = rxe_cleanup(pd); if (err) - goto err1; + rxe_err_pd(pd, "cleanup failed, err = %d\n", err); - ah = rxe_alloc(&rxe->ah_pool); - if (!ah) { - err = -ENOMEM; - goto err1; + return 0; +} + +/* ah */ +static int rxe_create_ah(struct ib_ah *ibah, + struct rdma_ah_init_attr *init_attr, + struct ib_udata *udata) +{ + struct rxe_dev *rxe = to_rdev(ibah->device); + struct rxe_ah *ah = to_rah(ibah); + struct rxe_create_ah_resp __user *uresp = NULL; + int err, cleanup_err; + + if (udata) { + /* test if new user provider */ + if (udata->outlen >= sizeof(*uresp)) + uresp = udata->outbuf; + ah->is_user = true; + } else { + ah->is_user = false; + } + + err = rxe_add_to_pool_ah(&rxe->ah_pool, ah, + init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); + if (err) { + rxe_dbg_dev(rxe, "unable to create ah\n"); + goto err_out; } - rxe_add_ref(pd); - ah->pd = pd; + /* create index > 0 */ + ah->ah_num = ah->elem.index; - err = rxe_init_av(rxe, attr, &ah->av); - if (err) - goto err2; + err = rxe_ah_chk_attr(ah, init_attr->ah_attr); + if (err) { + rxe_dbg_ah(ah, "bad attr\n"); + goto err_cleanup; + } - return &ah->ibah; + if (uresp) { + /* only if new user provider */ + err = copy_to_user(&uresp->ah_num, &ah->ah_num, + sizeof(uresp->ah_num)); + if (err) { + err = -EFAULT; + rxe_dbg_ah(ah, "unable to copy to user\n"); + goto err_cleanup; + } + } else if (ah->is_user) { + /* only if old user provider */ + ah->ah_num = 0; + } -err2: - rxe_drop_ref(pd); - rxe_drop_ref(ah); -err1: - return ERR_PTR(err); + rxe_init_av(init_attr->ah_attr, &ah->av); + rxe_finalize(ah); + + return 0; + +err_cleanup: + cleanup_err = rxe_cleanup(ah); + if (cleanup_err) + rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err); +err_out: + rxe_err_ah(ah, "returned err = %d\n", err); + return err; } static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) { - int err; - struct rxe_dev *rxe = to_rdev(ibah->device); struct rxe_ah *ah = to_rah(ibah); + int err; - err = rxe_av_chk_attr(rxe, attr); - if (err) - return err; + err = rxe_ah_chk_attr(ah, attr); + if (err) { + rxe_dbg_ah(ah, "bad attr\n"); + goto err_out; + } - err = rxe_init_av(rxe, attr, &ah->av); - if (err) - return err; + rxe_init_av(attr, &ah->av); return 0; + +err_out: + rxe_err_ah(ah, "returned err = %d\n", err); + return err; } static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) { - struct rxe_dev *rxe = to_rdev(ibah->device); struct rxe_ah *ah = to_rah(ibah); memset(attr, 0, sizeof(*attr)); attr->type = ibah->type; - rxe_av_to_attr(rxe, &ah->av, attr); + rxe_av_to_attr(&ah->av, attr); + return 0; } -static int rxe_destroy_ah(struct ib_ah *ibah) +static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) { struct rxe_ah *ah = to_rah(ibah); + int err; + + err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); + if (err) + rxe_err_ah(ah, "cleanup failed, err = %d\n", err); - rxe_drop_ref(ah->pd); - rxe_drop_ref(ah); return 0; } -static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr) +/* srq */ +static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, + struct ib_udata *udata) { - int err; - int i; - u32 length; - struct rxe_recv_wqe *recv_wqe; - int num_sge = ibwr->num_sge; + struct rxe_dev *rxe = to_rdev(ibsrq->device); + struct rxe_pd *pd = to_rpd(ibsrq->pd); + struct rxe_srq *srq = to_rsrq(ibsrq); + struct rxe_create_srq_resp __user *uresp = NULL; + int err, cleanup_err; - if (unlikely(queue_full(rq->queue))) { - err = -ENOMEM; - goto err1; + if (udata) { + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_err_dev(rxe, "malformed udata\n"); + goto err_out; + } + uresp = udata->outbuf; } - if (unlikely(num_sge > rq->max_sge)) { - err = -EINVAL; - goto err1; + if (init->srq_type != IB_SRQT_BASIC) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "srq type = %d, not supported\n", + init->srq_type); + goto err_out; } - length = 0; - for (i = 0; i < num_sge; i++) - length += ibwr->sg_list[i].length; - - recv_wqe = producer_addr(rq->queue); - recv_wqe->wr_id = ibwr->wr_id; - recv_wqe->num_sge = num_sge; - - memcpy(recv_wqe->dma.sge, ibwr->sg_list, - num_sge * sizeof(struct ib_sge)); - - recv_wqe->dma.length = length; - recv_wqe->dma.resid = length; - recv_wqe->dma.num_sge = num_sge; - recv_wqe->dma.cur_sge = 0; - recv_wqe->dma.sge_offset = 0; - - /* make sure all changes to the work queue are written before we - * update the producer pointer - */ - smp_wmb(); - - advance_producer(rq->queue); - return 0; - -err1: - return err; -} - -static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd, - struct ib_srq_init_attr *init, - struct ib_udata *udata) -{ - int err; - struct rxe_dev *rxe = to_rdev(ibpd->device); - struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_srq *srq; - struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL; - - err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); - if (err) - goto err1; + err = rxe_srq_chk_init(rxe, init); + if (err) { + rxe_dbg_dev(rxe, "invalid init attributes\n"); + goto err_out; + } - srq = rxe_alloc(&rxe->srq_pool); - if (!srq) { - err = -ENOMEM; - goto err1; + err = rxe_add_to_pool(&rxe->srq_pool, srq); + if (err) { + rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err); + goto err_out; } - rxe_add_index(srq); - rxe_add_ref(pd); + rxe_get(pd); srq->pd = pd; - err = rxe_srq_from_init(rxe, srq, init, context, udata); - if (err) - goto err2; + err = rxe_srq_from_init(rxe, srq, init, udata, uresp); + if (err) { + rxe_dbg_srq(srq, "create srq failed, err = %d\n", err); + goto err_cleanup; + } - return &srq->ibsrq; + return 0; -err2: - rxe_drop_ref(pd); - rxe_drop_index(srq); - rxe_drop_ref(srq); -err1: - return ERR_PTR(err); +err_cleanup: + cleanup_err = rxe_cleanup(srq); + if (cleanup_err) + rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err); +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata) { - int err; struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_dev *rxe = to_rdev(ibsrq->device); + struct rxe_modify_srq_cmd cmd = {}; + int err; + + if (udata) { + if (udata->inlen < sizeof(cmd)) { + err = -EINVAL; + rxe_dbg_srq(srq, "malformed udata\n"); + goto err_out; + } + + err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); + if (err) { + err = -EFAULT; + rxe_dbg_srq(srq, "unable to read udata\n"); + goto err_out; + } + } err = rxe_srq_chk_attr(rxe, srq, attr, mask); - if (err) - goto err1; + if (err) { + rxe_dbg_srq(srq, "bad init attributes\n"); + goto err_out; + } - err = rxe_srq_from_attr(rxe, srq, attr, mask, udata); - if (err) - goto err1; + err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata); + if (err) { + rxe_dbg_srq(srq, "bad attr\n"); + goto err_out; + } return 0; -err1: +err_out: + rxe_err_srq(srq, "returned err = %d\n", err); return err; } static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) { struct rxe_srq *srq = to_rsrq(ibsrq); + int err; - if (srq->error) - return -EINVAL; + if (srq->error) { + err = -EINVAL; + rxe_dbg_srq(srq, "srq in error state\n"); + goto err_out; + } attr->max_wr = srq->rq.queue->buf->index_mask; attr->max_sge = srq->rq.max_sge; attr->srq_limit = srq->limit; return 0; -} - -static int rxe_destroy_srq(struct ib_srq *ibsrq) -{ - struct rxe_srq *srq = to_rsrq(ibsrq); - - if (srq->rq.queue) - rxe_queue_cleanup(srq->rq.queue); - - rxe_drop_ref(srq->pd); - rxe_drop_index(srq); - rxe_drop_ref(srq); - return 0; +err_out: + rxe_err_srq(srq, "returned err = %d\n", err); + return err; } -static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, - struct ib_recv_wr **bad_wr) +static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr) { int err = 0; - unsigned long flags; struct rxe_srq *srq = to_rsrq(ibsrq); + unsigned long flags; spin_lock_irqsave(&srq->rq.producer_lock, flags); @@ -546,73 +524,126 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, spin_unlock_irqrestore(&srq->rq.producer_lock, flags); - if (err) + if (err) { *bad_wr = wr; + rxe_err_srq(srq, "returned err = %d\n", err); + } return err; } -static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd, - struct ib_qp_init_attr *init, - struct ib_udata *udata) +static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { + struct rxe_srq *srq = to_rsrq(ibsrq); int err; - struct rxe_dev *rxe = to_rdev(ibpd->device); - struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_qp *qp; - err = rxe_qp_chk_init(rxe, init); + err = rxe_cleanup(srq); if (err) - goto err1; + rxe_err_srq(srq, "cleanup failed, err = %d\n", err); - qp = rxe_alloc(&rxe->qp_pool); - if (!qp) { - err = -ENOMEM; - goto err1; - } + return 0; +} + +/* qp */ +static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, + struct ib_udata *udata) +{ + struct rxe_dev *rxe = to_rdev(ibqp->device); + struct rxe_pd *pd = to_rpd(ibqp->pd); + struct rxe_qp *qp = to_rqp(ibqp); + struct rxe_create_qp_resp __user *uresp = NULL; + int err, cleanup_err; if (udata) { if (udata->inlen) { err = -EINVAL; - goto err2; + rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err); + goto err_out; } - qp->is_user = 1; + + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err); + goto err_out; + } + + qp->is_user = true; + uresp = udata->outbuf; + } else { + qp->is_user = false; } - rxe_add_index(qp); + if (init->create_flags) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err); + goto err_out; + } - err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd); - if (err) - goto err3; + err = rxe_qp_chk_init(rxe, init); + if (err) { + rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err); + goto err_out; + } - return &qp->ibqp; + err = rxe_add_to_pool(&rxe->qp_pool, qp); + if (err) { + rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err); + goto err_out; + } -err3: - rxe_drop_index(qp); -err2: - rxe_drop_ref(qp); -err1: - return ERR_PTR(err); + err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); + if (err) { + rxe_dbg_qp(qp, "create qp failed, err = %d\n", err); + goto err_cleanup; + } + + rxe_finalize(qp); + return 0; + +err_cleanup: + cleanup_err = rxe_cleanup(qp); + if (cleanup_err) + rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err); +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibqp->device); struct rxe_qp *qp = to_rqp(ibqp); + int err; + + if (mask & ~IB_QP_ATTR_STANDARD_BITS) { + err = -EOPNOTSUPP; + rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n", + mask, err); + goto err_out; + } err = rxe_qp_chk_attr(rxe, qp, attr, mask); - if (err) - goto err1; + if (err) { + rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err); + goto err_out; + } err = rxe_qp_from_attr(qp, attr, mask, udata); - if (err) - goto err1; + if (err) { + rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err); + goto err_out; + } + + if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) + qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, + qp->ibqp.qp_num, + qp->attr.dest_qp_num); return 0; -err1: +err_out: + rxe_err_qp(qp, "returned err = %d\n", err); return err; } @@ -627,64 +658,125 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, return 0; } -static int rxe_destroy_qp(struct ib_qp *ibqp) +static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { struct rxe_qp *qp = to_rqp(ibqp); + int err; + + err = rxe_qp_chk_destroy(qp); + if (err) { + rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err); + goto err_out; + } + + err = rxe_cleanup(qp); + if (err) + rxe_err_qp(qp, "cleanup failed, err = %d\n", err); - rxe_qp_destroy(qp); - rxe_drop_index(qp); - rxe_drop_ref(qp); return 0; + +err_out: + rxe_err_qp(qp, "returned err = %d\n", err); + return err; } -static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr, - unsigned int mask, unsigned int length) +/* send wr */ + +/* sanity check incoming send work request */ +static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, + unsigned int *maskp, unsigned int *lengthp) { int num_sge = ibwr->num_sge; struct rxe_sq *sq = &qp->sq; + unsigned int mask = 0; + unsigned long length = 0; + int err = -EINVAL; + int i; - if (unlikely(num_sge > sq->max_sge)) - goto err1; + do { + mask = wr_opcode_mask(ibwr->opcode, qp); + if (!mask) { + rxe_err_qp(qp, "bad wr opcode for qp type\n"); + break; + } - if (unlikely(mask & WR_ATOMIC_MASK)) { - if (length < 8) - goto err1; + if (num_sge > sq->max_sge) { + rxe_err_qp(qp, "num_sge > max_sge\n"); + break; + } - if (atomic_wr(ibwr)->remote_addr & 0x7) - goto err1; - } + length = 0; + for (i = 0; i < ibwr->num_sge; i++) + length += ibwr->sg_list[i].length; + + if (length > RXE_PORT_MAX_MSG_SZ) { + rxe_err_qp(qp, "message length too long\n"); + break; + } - if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && - (length > sq->max_inline))) - goto err1; + if (mask & WR_ATOMIC_MASK) { + if (length != 8) { + rxe_err_qp(qp, "atomic length != 8\n"); + break; + } + if (atomic_wr(ibwr)->remote_addr & 0x7) { + rxe_err_qp(qp, "misaligned atomic address\n"); + break; + } + } + if (ibwr->send_flags & IB_SEND_INLINE) { + if (!(mask & WR_INLINE_MASK)) { + rxe_err_qp(qp, "opcode doesn't support inline data\n"); + break; + } + if (length > sq->max_inline) { + rxe_err_qp(qp, "inline length too big\n"); + break; + } + } - return 0; + err = 0; + } while (0); -err1: - return -EINVAL; + *maskp = mask; + *lengthp = (int)length; + + return err; } -static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, - struct ib_send_wr *ibwr) +static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, + const struct ib_send_wr *ibwr) { wr->wr_id = ibwr->wr_id; - wr->num_sge = ibwr->num_sge; wr->opcode = ibwr->opcode; wr->send_flags = ibwr->send_flags; if (qp_type(qp) == IB_QPT_UD || - qp_type(qp) == IB_QPT_SMI || qp_type(qp) == IB_QPT_GSI) { + struct ib_ah *ibah = ud_wr(ibwr)->ah; + wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn; wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey; + wr->wr.ud.ah_num = to_rah(ibah)->ah_num; if (qp_type(qp) == IB_QPT_GSI) wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index; - if (wr->opcode == IB_WR_SEND_WITH_IMM) + + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: wr->ex.imm_data = ibwr->ex.imm_data; + break; + case IB_WR_SEND: + break; + default: + rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n", + wr->opcode); + return -EINVAL; + } } else { switch (wr->opcode) { case IB_WR_RDMA_WRITE_WITH_IMM: wr->ex.imm_data = ibwr->ex.imm_data; + fallthrough; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; @@ -696,6 +788,11 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, case IB_WR_SEND_WITH_INV: wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; break; + case IB_WR_RDMA_READ_WITH_INV: + wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; + wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; + wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey; + break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: wr->wr.atomic.remote_addr = @@ -707,55 +804,66 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, break; case IB_WR_LOCAL_INV: wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey; - break; + break; case IB_WR_REG_MR: wr->wr.reg.mr = reg_wr(ibwr)->mr; wr->wr.reg.key = reg_wr(ibwr)->key; wr->wr.reg.access = reg_wr(ibwr)->access; - break; - default: break; + case IB_WR_SEND: + case IB_WR_BIND_MW: + case IB_WR_FLUSH: + case IB_WR_ATOMIC_WRITE: + break; + default: + rxe_err_qp(qp, "unsupported wr opcode %d\n", + wr->opcode); + return -EINVAL; } } + + return 0; } -static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, - unsigned int mask, unsigned int length, - struct rxe_send_wqe *wqe) +static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, + const struct ib_send_wr *ibwr) { - int num_sge = ibwr->num_sge; - struct ib_sge *sge; + struct ib_sge *sge = ibwr->sg_list; + u8 *p = wqe->dma.inline_data; int i; - u8 *p; - - init_send_wr(qp, &wqe->wr, ibwr); - if (qp_type(qp) == IB_QPT_UD || - qp_type(qp) == IB_QPT_SMI || - qp_type(qp) == IB_QPT_GSI) - memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av)); + for (i = 0; i < ibwr->num_sge; i++, sge++) { + memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length); + p += sge->length; + } +} - if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) { - p = wqe->dma.inline_data; +static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, + unsigned int mask, unsigned int length, + struct rxe_send_wqe *wqe) +{ + int num_sge = ibwr->num_sge; + int err; - sge = ibwr->sg_list; - for (i = 0; i < num_sge; i++, sge++) { - memcpy(p, (void *)(uintptr_t)sge->addr, - sge->length); + err = init_send_wr(qp, &wqe->wr, ibwr); + if (err) + return err; - p += sge->length; - } - } else if (mask & WR_REG_MASK) { + /* local operation */ + if (unlikely(mask & WR_LOCAL_OP_MASK)) { wqe->mask = mask; wqe->state = wqe_state_posted; return 0; - } else + } + + if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) + copy_inline_data_to_wqe(wqe, ibwr); + else memcpy(wqe->dma.sge, ibwr->sg_list, num_sge * sizeof(struct ib_sge)); - wqe->iova = (mask & WR_ATOMIC_MASK) ? - atomic_wr(ibwr)->remote_addr : - rdma_wr(ibwr)->remote_addr; + wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr : + mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0; wqe->mask = mask; wqe->dma.length = length; wqe->dma.resid = length; @@ -768,137 +876,178 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, return 0; } -static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, - unsigned int mask, u32 length) +static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr) { int err; struct rxe_sq *sq = &qp->sq; struct rxe_send_wqe *send_wqe; - unsigned long flags; + unsigned int mask; + unsigned int length; + int full; - err = validate_send_wr(qp, ibwr, mask, length); + err = validate_send_wr(qp, ibwr, &mask, &length); if (err) return err; - spin_lock_irqsave(&qp->sq.sq_lock, flags); - - if (unlikely(queue_full(sq->queue))) { - err = -ENOMEM; - goto err1; + full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP); + if (unlikely(full)) { + rxe_err_qp(qp, "send queue full\n"); + return -ENOMEM; } - send_wqe = producer_addr(sq->queue); - + send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP); err = init_send_wqe(qp, ibwr, mask, length, send_wqe); - if (unlikely(err)) - goto err1; - - /* - * make sure all changes to the work queue are - * written before we update the producer pointer - */ - smp_wmb(); - - advance_producer(sq->queue); - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); - - return 0; + if (!err) + queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP); -err1: - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); return err; } -static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, - struct ib_send_wr **bad_wr) +static int rxe_post_send_kernel(struct rxe_qp *qp, + const struct ib_send_wr *ibwr, + const struct ib_send_wr **bad_wr) { int err = 0; - unsigned int mask; - unsigned int length = 0; - int i; - int must_sched; - - while (wr) { - mask = wr_opcode_mask(wr->opcode, qp); - if (unlikely(!mask)) { - err = -EINVAL; - *bad_wr = wr; - break; - } - - if (unlikely((wr->send_flags & IB_SEND_INLINE) && - !(mask & WR_INLINE_MASK))) { - err = -EINVAL; - *bad_wr = wr; - break; - } - - length = 0; - for (i = 0; i < wr->num_sge; i++) - length += wr->sg_list[i].length; - - err = post_one_send(qp, wr, mask, length); + unsigned long flags; + int good = 0; + spin_lock_irqsave(&qp->sq.sq_lock, flags); + while (ibwr) { + err = post_one_send(qp, ibwr); if (err) { - *bad_wr = wr; + *bad_wr = ibwr; break; + } else { + good++; } - wr = wr->next; + ibwr = ibwr->next; } + spin_unlock_irqrestore(&qp->sq.sq_lock, flags); - /* - * Must sched in case of GSI QP because ib_send_mad() hold irq lock, - * and the requester call ip_local_out_sk() that takes spin_lock_bh. - */ - must_sched = (qp_type(qp) == IB_QPT_GSI) || - (queue_count(qp->sq.queue) > 1); - - rxe_run_task(&qp->req.task, must_sched); + /* kickoff processing of any posted wqes */ + if (good) + rxe_sched_task(&qp->send_task); return err; } -static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, - struct ib_send_wr **bad_wr) +static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad_wr) { struct rxe_qp *qp = to_rqp(ibqp); + int err; + unsigned long flags; - if (unlikely(!qp->valid)) { - *bad_wr = wr; + spin_lock_irqsave(&qp->state_lock, flags); + /* caller has already called destroy_qp */ + if (WARN_ON_ONCE(!qp->valid)) { + spin_unlock_irqrestore(&qp->state_lock, flags); + rxe_err_qp(qp, "qp has been destroyed\n"); return -EINVAL; } - if (unlikely(qp->req.state < QP_STATE_READY)) { + if (unlikely(qp_state(qp) < IB_QPS_RTS)) { + spin_unlock_irqrestore(&qp->state_lock, flags); *bad_wr = wr; + rxe_err_qp(qp, "qp not ready to send\n"); return -EINVAL; } + spin_unlock_irqrestore(&qp->state_lock, flags); if (qp->is_user) { /* Utilize process context to do protocol processing */ - rxe_run_task(&qp->req.task, 0); - return 0; - } else - return rxe_post_send_kernel(qp, wr, bad_wr); + rxe_sched_task(&qp->send_task); + } else { + err = rxe_post_send_kernel(qp, wr, bad_wr); + if (err) + return err; + } + + return 0; } -static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, - struct ib_recv_wr **bad_wr) +/* recv wr */ +static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) +{ + int i; + unsigned long length; + struct rxe_recv_wqe *recv_wqe; + int num_sge = ibwr->num_sge; + int full; + int err; + + full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP); + if (unlikely(full)) { + err = -ENOMEM; + rxe_dbg("queue full\n"); + goto err_out; + } + + if (unlikely(num_sge > rq->max_sge)) { + err = -EINVAL; + rxe_dbg("bad num_sge > max_sge\n"); + goto err_out; + } + + length = 0; + for (i = 0; i < num_sge; i++) + length += ibwr->sg_list[i].length; + + if (length > RXE_PORT_MAX_MSG_SZ) { + err = -EINVAL; + rxe_dbg("message length too long\n"); + goto err_out; + } + + recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP); + + recv_wqe->wr_id = ibwr->wr_id; + recv_wqe->dma.length = length; + recv_wqe->dma.resid = length; + recv_wqe->dma.num_sge = num_sge; + recv_wqe->dma.cur_sge = 0; + recv_wqe->dma.sge_offset = 0; + memcpy(recv_wqe->dma.sge, ibwr->sg_list, + num_sge * sizeof(struct ib_sge)); + + queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP); + + return 0; + +err_out: + rxe_dbg("returned err = %d\n", err); + return err; +} + +static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr) { int err = 0; struct rxe_qp *qp = to_rqp(ibqp); struct rxe_rq *rq = &qp->rq; unsigned long flags; - if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { + spin_lock_irqsave(&qp->state_lock, flags); + /* caller has already called destroy_qp */ + if (WARN_ON_ONCE(!qp->valid)) { + spin_unlock_irqrestore(&qp->state_lock, flags); + rxe_err_qp(qp, "qp has been destroyed\n"); + return -EINVAL; + } + + /* see C10-97.2.1 */ + if (unlikely((qp_state(qp) < IB_QPS_INIT))) { + spin_unlock_irqrestore(&qp->state_lock, flags); *bad_wr = wr; - err = -EINVAL; - goto err1; + rxe_dbg_qp(qp, "qp not ready to post recv\n"); + return -EINVAL; } + spin_unlock_irqrestore(&qp->state_lock, flags); if (unlikely(qp->srq)) { *bad_wr = wr; - err = -EINVAL; - goto err1; + rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n"); + return -EINVAL; } spin_lock_irqsave(&rq->producer_lock, flags); @@ -914,73 +1063,102 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_unlock_irqrestore(&rq->producer_lock, flags); - if (qp->resp.state == QP_STATE_ERROR) - rxe_run_task(&qp->resp.task, 1); + spin_lock_irqsave(&qp->state_lock, flags); + if (qp_state(qp) == IB_QPS_ERR) + rxe_sched_task(&qp->recv_task); + spin_unlock_irqrestore(&qp->state_lock, flags); -err1: return err; } -static struct ib_cq *rxe_create_cq(struct ib_device *dev, - const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, - struct ib_udata *udata) +/* cq */ +static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct uverbs_attr_bundle *attrs) { - int err; + struct ib_udata *udata = &attrs->driver_udata; + struct ib_device *dev = ibcq->device; struct rxe_dev *rxe = to_rdev(dev); - struct rxe_cq *cq; - - if (attr->flags) - return ERR_PTR(-EINVAL); - - err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata); - if (err) - goto err1; + struct rxe_cq *cq = to_rcq(ibcq); + struct rxe_create_cq_resp __user *uresp = NULL; + int err, cleanup_err; - cq = rxe_alloc(&rxe->cq_pool); - if (!cq) { - err = -ENOMEM; - goto err1; + if (udata) { + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err); + goto err_out; + } + uresp = udata->outbuf; } - err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, - context, udata); - if (err) - goto err2; + if (attr->flags) { + err = -EOPNOTSUPP; + rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err); + goto err_out; + } - return &cq->ibcq; + err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector); + if (err) { + rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err); + goto err_out; + } -err2: - rxe_drop_ref(cq); -err1: - return ERR_PTR(err); -} + err = rxe_add_to_pool(&rxe->cq_pool, cq); + if (err) { + rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err); + goto err_out; + } -static int rxe_destroy_cq(struct ib_cq *ibcq) -{ - struct rxe_cq *cq = to_rcq(ibcq); + err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, + uresp); + if (err) { + rxe_dbg_cq(cq, "create cq failed, err = %d\n", err); + goto err_cleanup; + } - rxe_drop_ref(cq); return 0; + +err_cleanup: + cleanup_err = rxe_cleanup(cq); + if (cleanup_err) + rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err); +err_out: + rxe_err_dev(rxe, "returned err = %d\n", err); + return err; } static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) { - int err; struct rxe_cq *cq = to_rcq(ibcq); struct rxe_dev *rxe = to_rdev(ibcq->device); + struct rxe_resize_cq_resp __user *uresp = NULL; + int err; - err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata); - if (err) - goto err1; + if (udata) { + if (udata->outlen < sizeof(*uresp)) { + err = -EINVAL; + rxe_dbg_cq(cq, "malformed udata\n"); + goto err_out; + } + uresp = udata->outbuf; + } - err = rxe_cq_resize_queue(cq, cqe, udata); - if (err) - goto err1; + err = rxe_cq_chk_attr(rxe, cq, cqe, 0); + if (err) { + rxe_dbg_cq(cq, "bad attr, err = %d\n", err); + goto err_out; + } + + err = rxe_cq_resize_queue(cq, cqe, uresp, udata); + if (err) { + rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err); + goto err_out; + } return 0; -err1: +err_out: + rxe_err_cq(cq, "returned err = %d\n", err); return err; } @@ -993,12 +1171,12 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) spin_lock_irqsave(&cq->cq_lock, flags); for (i = 0; i < num_entries; i++) { - cqe = queue_head(cq->queue); + cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP); if (!cqe) - break; + break; /* queue empty */ memcpy(wc++, &cqe->ibwc, sizeof(*wc)); - advance_consumer(cq->queue); + queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP); } spin_unlock_irqrestore(&cq->cq_lock, flags); @@ -1008,7 +1186,9 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) { struct rxe_cq *cq = to_rcq(ibcq); - int count = queue_count(cq->queue); + int count; + + count = queue_count(cq->queue, QUEUE_TYPE_TO_ULP); return (count > wc_cnt) ? wc_cnt : count; } @@ -1016,14 +1196,15 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct rxe_cq *cq = to_rcq(ibcq); - unsigned long irq_flags; int ret = 0; + int empty; + unsigned long irq_flags; spin_lock_irqsave(&cq->cq_lock, irq_flags); - if (cq->notify != IB_CQ_NEXT_COMP) - cq->notify = flags & IB_CQ_SOLICITED_MASK; + cq->notify |= flags & IB_CQ_SOLICITED_MASK; + empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP); - if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue)) + if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty) ret = 1; spin_unlock_irqrestore(&cq->cq_lock, irq_flags); @@ -1031,343 +1212,354 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) return ret; } +static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) +{ + struct rxe_cq *cq = to_rcq(ibcq); + int err; + + /* See IBA C11-17: The CI shall return an error if this Verb is + * invoked while a Work Queue is still associated with the CQ. + */ + if (atomic_read(&cq->num_wq)) { + err = -EINVAL; + rxe_dbg_cq(cq, "still in use\n"); + goto err_out; + } + + err = rxe_cleanup(cq); + if (err) + rxe_err_cq(cq, "cleanup failed, err = %d\n", err); + + return 0; + +err_out: + rxe_err_cq(cq, "returned err = %d\n", err); + return err; +} + +/* mr */ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) { struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_mem *mr; + struct rxe_mr *mr; int err; - mr = rxe_alloc(&rxe->mr_pool); - if (!mr) { - err = -ENOMEM; - goto err1; - } - - rxe_add_index(mr); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); - rxe_add_ref(pd); + err = rxe_add_to_pool(&rxe->mr_pool, mr); + if (err) { + rxe_dbg_dev(rxe, "unable to create mr\n"); + goto err_free; + } - err = rxe_mem_init_dma(rxe, pd, access, mr); - if (err) - goto err2; + rxe_get(pd); + mr->ibmr.pd = ibpd; + mr->ibmr.device = ibpd->device; + rxe_mr_init_dma(access, mr); + rxe_finalize(mr); return &mr->ibmr; -err2: - rxe_drop_ref(pd); - rxe_drop_index(mr); - rxe_drop_ref(mr); -err1: +err_free: + kfree(mr); + rxe_err_pd(pd, "returned err = %d\n", err); return ERR_PTR(err); } -static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, - u64 start, - u64 length, - u64 iova, - int access, struct ib_udata *udata) +static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start, + u64 length, u64 iova, int access, + struct ib_dmah *dmah, + struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_mem *mr; + struct rxe_mr *mr; + int err, cleanup_err; - mr = rxe_alloc(&rxe->mr_pool); - if (!mr) { - err = -ENOMEM; - goto err2; + if (dmah) + return ERR_PTR(-EOPNOTSUPP); + + if (access & ~RXE_ACCESS_SUPPORTED_MR) { + rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access, + RXE_ACCESS_SUPPORTED_MR); + return ERR_PTR(-EOPNOTSUPP); } - rxe_add_index(mr); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); - rxe_add_ref(pd); + err = rxe_add_to_pool(&rxe->mr_pool, mr); + if (err) { + rxe_dbg_pd(pd, "unable to create mr\n"); + goto err_free; + } - err = rxe_mem_init_user(rxe, pd, start, length, iova, - access, udata, mr); - if (err) - goto err3; + rxe_get(pd); + mr->ibmr.pd = ibpd; + mr->ibmr.device = ibpd->device; + if (access & IB_ACCESS_ON_DEMAND) + err = rxe_odp_mr_init_user(rxe, start, length, iova, access, mr); + else + err = rxe_mr_init_user(rxe, start, length, access, mr); + if (err) { + rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err); + goto err_cleanup; + } + + rxe_finalize(mr); return &mr->ibmr; -err3: - rxe_drop_ref(pd); - rxe_drop_index(mr); - rxe_drop_ref(mr); -err2: +err_cleanup: + cleanup_err = rxe_cleanup(mr); + if (cleanup_err) + rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err); +err_free: + kfree(mr); + rxe_err_pd(pd, "returned err = %d\n", err); return ERR_PTR(err); } -static int rxe_dereg_mr(struct ib_mr *ibmr) +static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags, + u64 start, u64 length, u64 iova, + int access, struct ib_pd *ibpd, + struct ib_udata *udata) { - struct rxe_mem *mr = to_rmr(ibmr); + struct rxe_mr *mr = to_rmr(ibmr); + struct rxe_pd *old_pd = to_rpd(ibmr->pd); + struct rxe_pd *pd = to_rpd(ibpd); - mr->state = RXE_MEM_STATE_ZOMBIE; - rxe_drop_ref(mr->pd); - rxe_drop_index(mr); - rxe_drop_ref(mr); - return 0; + /* for now only support the two easy cases: + * rereg_pd and rereg_access + */ + if (flags & ~RXE_MR_REREG_SUPPORTED) { + rxe_err_mr(mr, "flags = %#x not supported\n", flags); + return ERR_PTR(-EOPNOTSUPP); + } + + if (flags & IB_MR_REREG_PD) { + rxe_put(old_pd); + rxe_get(pd); + mr->ibmr.pd = ibpd; + } + + if (flags & IB_MR_REREG_ACCESS) { + if (access & ~RXE_ACCESS_SUPPORTED_MR) { + rxe_err_mr(mr, "access = %#x not supported\n", access); + return ERR_PTR(-EOPNOTSUPP); + } + mr->access = access; + } + + return NULL; } -static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, - enum ib_mr_type mr_type, +static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, u32 max_num_sg) { struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); - struct rxe_mem *mr; - int err; + struct rxe_mr *mr; + int err, cleanup_err; - if (mr_type != IB_MR_TYPE_MEM_REG) - return ERR_PTR(-EINVAL); - - mr = rxe_alloc(&rxe->mr_pool); - if (!mr) { - err = -ENOMEM; - goto err1; + if (mr_type != IB_MR_TYPE_MEM_REG) { + err = -EINVAL; + rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n", + mr_type, err); + goto err_out; } - rxe_add_index(mr); - - rxe_add_ref(pd); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); - err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr); + err = rxe_add_to_pool(&rxe->mr_pool, mr); if (err) - goto err2; + goto err_free; + rxe_get(pd); + mr->ibmr.pd = ibpd; + mr->ibmr.device = ibpd->device; + + err = rxe_mr_init_fast(max_num_sg, mr); + if (err) { + rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err); + goto err_cleanup; + } + + rxe_finalize(mr); return &mr->ibmr; -err2: - rxe_drop_ref(pd); - rxe_drop_index(mr); - rxe_drop_ref(mr); -err1: +err_cleanup: + cleanup_err = rxe_cleanup(mr); + if (cleanup_err) + rxe_err_mr(mr, "cleanup failed, err = %d\n", err); +err_free: + kfree(mr); +err_out: + rxe_err_pd(pd, "returned err = %d\n", err); return ERR_PTR(err); } -static int rxe_set_page(struct ib_mr *ibmr, u64 addr) +static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { - struct rxe_mem *mr = to_rmr(ibmr); - struct rxe_map *map; - struct rxe_phys_buf *buf; + struct rxe_mr *mr = to_rmr(ibmr); + int err, cleanup_err; - if (unlikely(mr->nbuf == mr->num_buf)) - return -ENOMEM; - - map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; - buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; + /* See IBA 10.6.7.2.6 */ + if (atomic_read(&mr->num_mw) > 0) { + err = -EINVAL; + rxe_dbg_mr(mr, "mr has mw's bound\n"); + goto err_out; + } - buf->addr = addr; - buf->size = ibmr->page_size; - mr->nbuf++; + cleanup_err = rxe_cleanup(mr); + if (cleanup_err) + rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err); + kfree_rcu_mightsleep(mr); return 0; + +err_out: + rxe_err_mr(mr, "returned err = %d\n", err); + return err; } -static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, - int sg_nents, unsigned int *sg_offset) +static ssize_t parent_show(struct device *device, + struct device_attribute *attr, char *buf) { - struct rxe_mem *mr = to_rmr(ibmr); - int n; - - mr->nbuf = 0; - - n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); - - mr->va = ibmr->iova; - mr->iova = ibmr->iova; - mr->length = ibmr->length; - mr->page_shift = ilog2(ibmr->page_size); - mr->page_mask = ibmr->page_size - 1; - mr->offset = mr->iova & mr->page_mask; + struct rxe_dev *rxe = + rdma_device_to_drv_device(device, struct rxe_dev, ib_dev); - return n; + return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1)); } -static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) -{ - int err; - struct rxe_dev *rxe = to_rdev(ibqp->device); - struct rxe_qp *qp = to_rqp(ibqp); - struct rxe_mc_grp *grp; - - /* takes a ref on grp if successful */ - err = rxe_mcast_get_grp(rxe, mgid, &grp); - if (err) - return err; +static DEVICE_ATTR_RO(parent); - err = rxe_mcast_add_grp_elem(rxe, qp, grp); +static struct attribute *rxe_dev_attributes[] = { + &dev_attr_parent.attr, + NULL +}; - rxe_drop_ref(grp); - return err; -} +static const struct attribute_group rxe_attr_group = { + .attrs = rxe_dev_attributes, +}; -static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid) +static int rxe_enable_driver(struct ib_device *ib_dev) { - struct rxe_dev *rxe = to_rdev(ibqp->device); - struct rxe_qp *qp = to_rqp(ibqp); + struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev); + struct net_device *ndev; - return rxe_mcast_drop_grp_elem(rxe, qp, mgid); -} + ndev = rxe_ib_device_get_netdev(ib_dev); + if (!ndev) + return -ENODEV; -static ssize_t rxe_show_parent(struct device *device, - struct device_attribute *attr, char *buf) -{ - struct rxe_dev *rxe = container_of(device, struct rxe_dev, - ib_dev.dev); + rxe_set_port_state(rxe); + dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev)); - return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1)); + dev_put(ndev); + return 0; } -static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL); - -static struct device_attribute *rxe_dev_attributes[] = { - &dev_attr_parent, +static const struct ib_device_ops rxe_dev_ops = { + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_RXE, + .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION, + + .alloc_hw_port_stats = rxe_ib_alloc_hw_port_stats, + .alloc_mr = rxe_alloc_mr, + .alloc_mw = rxe_alloc_mw, + .alloc_pd = rxe_alloc_pd, + .alloc_ucontext = rxe_alloc_ucontext, + .attach_mcast = rxe_attach_mcast, + .create_ah = rxe_create_ah, + .create_cq = rxe_create_cq, + .create_qp = rxe_create_qp, + .create_srq = rxe_create_srq, + .create_user_ah = rxe_create_ah, + .dealloc_driver = rxe_dealloc, + .dealloc_mw = rxe_dealloc_mw, + .dealloc_pd = rxe_dealloc_pd, + .dealloc_ucontext = rxe_dealloc_ucontext, + .dereg_mr = rxe_dereg_mr, + .destroy_ah = rxe_destroy_ah, + .destroy_cq = rxe_destroy_cq, + .destroy_qp = rxe_destroy_qp, + .destroy_srq = rxe_destroy_srq, + .detach_mcast = rxe_detach_mcast, + .device_group = &rxe_attr_group, + .enable_driver = rxe_enable_driver, + .get_dma_mr = rxe_get_dma_mr, + .get_hw_stats = rxe_ib_get_hw_stats, + .get_link_layer = rxe_get_link_layer, + .get_port_immutable = rxe_port_immutable, + .map_mr_sg = rxe_map_mr_sg, + .mmap = rxe_mmap, + .modify_ah = rxe_modify_ah, + .modify_device = rxe_modify_device, + .modify_port = rxe_modify_port, + .modify_qp = rxe_modify_qp, + .modify_srq = rxe_modify_srq, + .peek_cq = rxe_peek_cq, + .poll_cq = rxe_poll_cq, + .post_recv = rxe_post_recv, + .post_send = rxe_post_send, + .post_srq_recv = rxe_post_srq_recv, + .query_ah = rxe_query_ah, + .query_device = rxe_query_device, + .query_pkey = rxe_query_pkey, + .query_gid = rxe_query_gid, + .query_port = rxe_query_port, + .query_qp = rxe_query_qp, + .query_srq = rxe_query_srq, + .reg_user_mr = rxe_reg_user_mr, + .req_notify_cq = rxe_req_notify_cq, + .rereg_user_mr = rxe_rereg_user_mr, + .resize_cq = rxe_resize_cq, + + INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah), + INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq), + INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp), + INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq), + INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc), + INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw), }; -int rxe_register_device(struct rxe_dev *rxe) +int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name, + struct net_device *ndev) { int err; - int i; struct ib_device *dev = &rxe->ib_dev; - strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX); - strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); + strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); - dev->owner = THIS_MODULE; dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; dev->num_comp_vectors = num_possible_cpus(); - dev->dev.parent = rxe_dma_device(rxe); dev->local_dma_lkey = 0; addrconf_addr_eui48((unsigned char *)&dev->node_guid, - rxe->ndev->dev_addr); - dev->dev.dma_ops = &dma_virt_ops; - dma_coerce_mask_and_coherent(&dev->dev, - dma_get_required_mask(dev->dev.parent)); - - dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; - dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) - | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) - | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) - | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) - | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) - | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) - | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) - | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) - | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) - | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) - | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV) - | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) - | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) - | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) - | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) - | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) - | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) - | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) - | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) - | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) - | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) - | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ) - | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) - | BIT_ULL(IB_USER_VERBS_CMD_REG_MR) - | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) - | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) - | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH) - | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH) - | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) - | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) - | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) - ; - - dev->query_device = rxe_query_device; - dev->modify_device = rxe_modify_device; - dev->query_port = rxe_query_port; - dev->modify_port = rxe_modify_port; - dev->get_link_layer = rxe_get_link_layer; - dev->query_gid = rxe_query_gid; - dev->get_netdev = rxe_get_netdev; - dev->add_gid = rxe_add_gid; - dev->del_gid = rxe_del_gid; - dev->query_pkey = rxe_query_pkey; - dev->alloc_ucontext = rxe_alloc_ucontext; - dev->dealloc_ucontext = rxe_dealloc_ucontext; - dev->mmap = rxe_mmap; - dev->get_port_immutable = rxe_port_immutable; - dev->alloc_pd = rxe_alloc_pd; - dev->dealloc_pd = rxe_dealloc_pd; - dev->create_ah = rxe_create_ah; - dev->modify_ah = rxe_modify_ah; - dev->query_ah = rxe_query_ah; - dev->destroy_ah = rxe_destroy_ah; - dev->create_srq = rxe_create_srq; - dev->modify_srq = rxe_modify_srq; - dev->query_srq = rxe_query_srq; - dev->destroy_srq = rxe_destroy_srq; - dev->post_srq_recv = rxe_post_srq_recv; - dev->create_qp = rxe_create_qp; - dev->modify_qp = rxe_modify_qp; - dev->query_qp = rxe_query_qp; - dev->destroy_qp = rxe_destroy_qp; - dev->post_send = rxe_post_send; - dev->post_recv = rxe_post_recv; - dev->create_cq = rxe_create_cq; - dev->destroy_cq = rxe_destroy_cq; - dev->resize_cq = rxe_resize_cq; - dev->poll_cq = rxe_poll_cq; - dev->peek_cq = rxe_peek_cq; - dev->req_notify_cq = rxe_req_notify_cq; - dev->get_dma_mr = rxe_get_dma_mr; - dev->reg_user_mr = rxe_reg_user_mr; - dev->dereg_mr = rxe_dereg_mr; - dev->alloc_mr = rxe_alloc_mr; - dev->map_mr_sg = rxe_map_mr_sg; - dev->attach_mcast = rxe_attach_mcast; - dev->detach_mcast = rxe_detach_mcast; - dev->get_hw_stats = rxe_ib_get_hw_stats; - dev->alloc_hw_stats = rxe_ib_alloc_hw_stats; - - rxe->tfm = crypto_alloc_shash("crc32", 0, 0); - if (IS_ERR(rxe->tfm)) { - pr_err("failed to allocate crc algorithm err:%ld\n", - PTR_ERR(rxe->tfm)); - return PTR_ERR(rxe->tfm); - } - - err = ib_register_device(dev, NULL); - if (err) { - pr_warn("rxe_register_device failed, err = %d\n", err); - goto err1; - } + rxe->raw_gid); - for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) { - err = device_create_file(&dev->dev, rxe_dev_attributes[i]); - if (err) { - pr_warn("device_create_file failed, i = %d, err = %d\n", - i, err); - goto err2; - } - } + dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) | + BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ); - return 0; + ib_set_device_ops(dev, &rxe_dev_ops); + err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1); + if (err) + return err; -err2: - ib_unregister_device(dev); -err1: - crypto_free_shash(rxe->tfm); + err = ib_register_device(dev, ibdev_name, NULL); + if (err) + rxe_dbg_dev(rxe, "failed with error %d\n", err); + /* + * Note that rxe may be invalid at this point if another thread + * unregistered it. + */ return err; } - -int rxe_unregister_device(struct rxe_dev *rxe) -{ - int i; - struct ib_device *dev = &rxe->ib_dev; - - for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) - device_remove_file(&dev->dev, rxe_dev_attributes[i]); - - ib_unregister_device(dev); - - return 0; -} |
