diff options
Diffstat (limited to 'drivers/infiniband')
58 files changed, 2396 insertions, 1169 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 6ebd9ad95010..e3cdafff8ece 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -10,7 +10,8 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ - multicast.o mad.o smi.o agent.o mad_rmpp.o + multicast.o mad.o smi.o agent.o mad_rmpp.o \ + security.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 02971e239a18..a6cb379a4ebc 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -179,8 +179,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, } /* Construct the family header first */ - header = (struct rdma_ls_ip_resolve_header *) - skb_put(skb, NLMSG_ALIGN(sizeof(*header))); + header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); header->ifindex = dev_addr->bound_dev_if; nla_put(skb, attrtype, size, daddr); @@ -449,12 +448,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, return ret; rt = (struct rt6_info *)dst; - if (ipv6_addr_any(&fl6.saddr)) { - ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev, - &fl6.daddr, 0, &fl6.saddr); - if (ret) - goto put; - + if (ipv6_addr_any(&src_in->sin6_addr)) { src_in->sin6_family = AF_INET6; src_in->sin6_addr = fl6.saddr; } @@ -471,9 +465,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, *pdst = dst; return 0; -put: - dst_release(dst); - return ret; } #else static int addr6_resolve(struct sockaddr_in6 *src_in, diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index b1371eb9f46c..efc94304dee3 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -53,6 +53,7 @@ struct ib_update_work { struct work_struct work; struct ib_device *device; u8 port_num; + bool enforce_security; }; union ib_gid zgid; @@ -911,6 +912,26 @@ int ib_get_cached_pkey(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_pkey); +int ib_get_cached_subnet_prefix(struct ib_device *device, + u8 port_num, + u64 *sn_pfx) +{ + unsigned long flags; + int p; + + if (port_num < rdma_start_port(device) || + port_num > rdma_end_port(device)) + return -EINVAL; + + p = port_num - rdma_start_port(device); + read_lock_irqsave(&device->cache.lock, flags); + *sn_pfx = device->cache.ports[p].subnet_prefix; + read_unlock_irqrestore(&device->cache.lock, flags); + + return 0; +} +EXPORT_SYMBOL(ib_get_cached_subnet_prefix); + int ib_find_cached_pkey(struct ib_device *device, u8 port_num, u16 pkey, @@ -1022,7 +1043,8 @@ int ib_get_cached_port_state(struct ib_device *device, EXPORT_SYMBOL(ib_get_cached_port_state); static void ib_cache_update(struct ib_device *device, - u8 port) + u8 port, + bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; @@ -1108,8 +1130,15 @@ static void ib_cache_update(struct ib_device *device, device->cache.ports[port - rdma_start_port(device)].port_state = tprops->state; + device->cache.ports[port - rdma_start_port(device)].subnet_prefix = + tprops->subnet_prefix; write_unlock_irq(&device->cache.lock); + if (enforce_security) + ib_security_cache_change(device, + port, + tprops->subnet_prefix); + kfree(gid_cache); kfree(old_pkey_cache); kfree(tprops); @@ -1126,7 +1155,9 @@ static void ib_cache_task(struct work_struct *_work) struct ib_update_work *work = container_of(_work, struct ib_update_work, work); - ib_cache_update(work->device, work->port_num); + ib_cache_update(work->device, + work->port_num, + work->enforce_security); kfree(work); } @@ -1147,6 +1178,12 @@ static void ib_cache_event(struct ib_event_handler *handler, INIT_WORK(&work->work, ib_cache_task); work->device = event->device; work->port_num = event->element.port_num; + if (event->event == IB_EVENT_PKEY_CHANGE || + event->event == IB_EVENT_GID_CHANGE) + work->enforce_security = true; + else + work->enforce_security = false; + queue_work(ib_wq, &work->work); } } @@ -1172,7 +1209,7 @@ int ib_cache_setup_one(struct ib_device *device) goto out; for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) - ib_cache_update(device, p + rdma_start_port(device)); + ib_cache_update(device, p + rdma_start_port(device), true); INIT_IB_EVENT_HANDLER(&device->cache.event_handler, device, ib_cache_event); diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index d92ab4eaa8f3..11ae67514e13 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -38,6 +38,16 @@ #include <linux/cgroup_rdma.h> #include <rdma/ib_verbs.h> +#include <rdma/ib_mad.h> +#include "mad_priv.h" + +struct pkey_index_qp_list { + struct list_head pkey_index_list; + u16 pkey_index; + /* Lock to hold while iterating the qp_list. */ + spinlock_t qp_list_lock; + struct list_head qp_list; +}; #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) int cma_configfs_init(void); @@ -186,4 +196,109 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, int ib_nl_handle_ip_res_resp(struct sk_buff *skb, struct netlink_callback *cb); +int ib_get_cached_subnet_prefix(struct ib_device *device, + u8 port_num, + u64 *sn_pfx); + +#ifdef CONFIG_SECURITY_INFINIBAND +int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec); + +void ib_security_destroy_port_pkey_list(struct ib_device *device); + +void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix); + +int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata); + +int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev); +void ib_destroy_qp_security_begin(struct ib_qp_security *sec); +void ib_destroy_qp_security_abort(struct ib_qp_security *sec); +void ib_destroy_qp_security_end(struct ib_qp_security *sec); +int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev); +void ib_close_shared_qp_security(struct ib_qp_security *sec); +int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type); +void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); +int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); +#else +static inline int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec) +{ + return 0; +} + +static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) +{ +} + +static inline void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ +} + +static inline int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata) +{ + return qp->device->modify_qp(qp->real_qp, + qp_attr, + qp_attr_mask, + udata); +} + +static inline int ib_create_qp_security(struct ib_qp *qp, + struct ib_device *dev) +{ + return 0; +} + +static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec) +{ +} + +static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec) +{ +} + +static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec) +{ +} + +static inline int ib_open_shared_qp_security(struct ib_qp *qp, + struct ib_device *dev) +{ + return 0; +} + +static inline void ib_close_shared_qp_security(struct ib_qp_security *sec) +{ +} + +static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type) +{ + return 0; +} + +static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) +{ +} + +static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, + u16 pkey_index) +{ + return 0; +} +#endif #endif /* _CORE_PRIV_H */ diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 81d447da0048..631eaa9daf65 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -39,6 +39,8 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/netdevice.h> +#include <linux/security.h> +#include <linux/notifier.h> #include <rdma/rdma_netlink.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> @@ -82,6 +84,14 @@ static LIST_HEAD(client_list); static DEFINE_MUTEX(device_mutex); static DECLARE_RWSEM(lists_rwsem); +static int ib_security_change(struct notifier_block *nb, unsigned long event, + void *lsm_data); +static void ib_policy_change_task(struct work_struct *work); +static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); + +static struct notifier_block ibdev_lsm_nb = { + .notifier_call = ib_security_change, +}; static int ib_device_check_mandatory(struct ib_device *device) { @@ -325,6 +335,64 @@ void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len) } EXPORT_SYMBOL(ib_get_device_fw_str); +static int setup_port_pkey_list(struct ib_device *device) +{ + int i; + + /** + * device->port_pkey_list is indexed directly by the port number, + * Therefore it is declared as a 1 based array with potential empty + * slots at the beginning. + */ + device->port_pkey_list = kcalloc(rdma_end_port(device) + 1, + sizeof(*device->port_pkey_list), + GFP_KERNEL); + + if (!device->port_pkey_list) + return -ENOMEM; + + for (i = 0; i < (rdma_end_port(device) + 1); i++) { + spin_lock_init(&device->port_pkey_list[i].list_lock); + INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list); + } + + return 0; +} + +static void ib_policy_change_task(struct work_struct *work) +{ + struct ib_device *dev; + + down_read(&lists_rwsem); + list_for_each_entry(dev, &device_list, core_list) { + int i; + + for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { + u64 sp; + int ret = ib_get_cached_subnet_prefix(dev, + i, + &sp); + + WARN_ONCE(ret, + "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", + ret); + ib_security_cache_change(dev, i, sp); + } + } + up_read(&lists_rwsem); +} + +static int ib_security_change(struct notifier_block *nb, unsigned long event, + void *lsm_data) +{ + if (event != LSM_POLICY_CHANGE) + return NOTIFY_DONE; + + schedule_work(&ib_policy_change_work); + + return NOTIFY_OK; +} + /** * ib_register_device - Register an IB device with IB core * @device:Device to register @@ -385,6 +453,12 @@ int ib_register_device(struct ib_device *device, goto out; } + ret = setup_port_pkey_list(device); + if (ret) { + pr_warn("Couldn't create per port_pkey_list\n"); + goto out; + } + ret = ib_cache_setup_one(device); if (ret) { pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); @@ -468,6 +542,9 @@ void ib_unregister_device(struct ib_device *device) ib_device_unregister_sysfs(device); ib_cache_cleanup_one(device); + ib_security_destroy_port_pkey_list(device); + kfree(device->port_pkey_list); + down_write(&lists_rwsem); spin_lock_irqsave(&device->client_data_lock, flags); list_for_each_entry_safe(context, tmp, &device->client_data_list, list) @@ -1082,10 +1159,18 @@ static int __init ib_core_init(void) goto err_sa; } + ret = register_lsm_notifier(&ibdev_lsm_nb); + if (ret) { + pr_warn("Couldn't register LSM notifier. ret %d\n", ret); + goto err_ibnl_clients; + } + ib_cache_setup(); return 0; +err_ibnl_clients: + ib_remove_ibnl_clients(); err_sa: ib_sa_cleanup(); err_mad: @@ -1105,6 +1190,7 @@ err: static void __exit ib_core_cleanup(void) { + unregister_lsm_notifier(&ibdev_lsm_nb); ib_cache_cleanup(); ib_remove_ibnl_clients(); ib_sa_cleanup(); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 192ee3dafb80..f8f53bb90837 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -40,9 +40,11 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/security.h> #include <rdma/ib_cache.h> #include "mad_priv.h" +#include "core_priv.h" #include "mad_rmpp.h" #include "smi.h" #include "opa_smi.h" @@ -369,6 +371,12 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, atomic_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); + ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); + if (ret2) { + ret = ERR_PTR(ret2); + goto error4; + } + spin_lock_irqsave(&port_priv->reg_lock, flags); mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; @@ -386,7 +394,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (method) { if (method_in_use(&method, mad_reg_req)) - goto error4; + goto error5; } } ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, @@ -402,14 +410,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (is_vendor_method_in_use( vendor_class, mad_reg_req)) - goto error4; + goto error5; } } ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); } if (ret2) { ret = ERR_PTR(ret2); - goto error4; + goto error5; } } @@ -418,9 +426,10 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, spin_unlock_irqrestore(&port_priv->reg_lock, flags); return &mad_agent_priv->agent; - -error4: +error5: spin_unlock_irqrestore(&port_priv->reg_lock, flags); + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); +error4: kfree(reg_req); error3: kfree(mad_agent_priv); @@ -491,6 +500,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, struct ib_mad_agent *ret; struct ib_mad_snoop_private *mad_snoop_priv; int qpn; + int err; /* Validate parameters */ if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || @@ -525,17 +535,25 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, mad_snoop_priv->agent.port_num = port_num; mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; init_completion(&mad_snoop_priv->comp); + + err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); + if (err) { + ret = ERR_PTR(err); + goto error2; + } + mad_snoop_priv->snoop_index = register_snoop_agent( &port_priv->qp_info[qpn], mad_snoop_priv); if (mad_snoop_priv->snoop_index < 0) { ret = ERR_PTR(mad_snoop_priv->snoop_index); - goto error2; + goto error3; } atomic_set(&mad_snoop_priv->refcount, 1); return &mad_snoop_priv->agent; - +error3: + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); error2: kfree(mad_snoop_priv); error1: @@ -581,6 +599,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); + kfree(mad_agent_priv->reg_req); kfree(mad_agent_priv); } @@ -599,6 +619,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) deref_snoop_agent(mad_snoop_priv); wait_for_completion(&mad_snoop_priv->comp); + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); + kfree(mad_snoop_priv); } @@ -1215,12 +1237,16 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, /* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) { - mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_agent_priv = mad_send_wr->mad_agent_priv; + ret = ib_mad_enforce_security(mad_agent_priv, + mad_send_wr->send_wr.pkey_index); + if (ret) + goto error; + if (!send_buf->mad_agent->send_handler || (send_buf->timeout_ms && !send_buf->mad_agent->recv_handler)) { @@ -1946,6 +1972,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; + int ret; + + ret = ib_mad_enforce_security(mad_agent_priv, + mad_recv_wc->wc->pkey_index); + if (ret) { + ib_free_recv_mad(mad_recv_wc); + deref_mad_agent(mad_agent_priv); + } INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); @@ -2003,6 +2037,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_recv_wc); deref_mad_agent(mad_agent_priv); } + + return; } static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index fb7aec4047c8..70fa4cabe48e 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -759,8 +759,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, query->mad_buf->context[1] = NULL; /* Construct the family header first */ - header = (struct rdma_ls_resolve_header *) - skb_put(skb, NLMSG_ALIGN(sizeof(*header))); + header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); memcpy(header->device_name, query->port->agent->device->name, LS_DEVICE_NAME_MAX); header->port_num = query->port->port_num; diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c new file mode 100644 index 000000000000..3e8c38953912 --- /dev/null +++ b/drivers/infiniband/core/security.c @@ -0,0 +1,705 @@ +/* + * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifdef CONFIG_SECURITY_INFINIBAND + +#include <linux/security.h> +#include <linux/completion.h> +#include <linux/list.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_cache.h> +#include "core_priv.h" +#include "mad_priv.h" + +static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *pkey = NULL; + struct pkey_index_qp_list *tmp_pkey; + struct ib_device *dev = pp->sec->dev; + + spin_lock(&dev->port_pkey_list[pp->port_num].list_lock); + list_for_each_entry(tmp_pkey, + &dev->port_pkey_list[pp->port_num].pkey_list, + pkey_index_list) { + if (tmp_pkey->pkey_index == pp->pkey_index) { + pkey = tmp_pkey; + break; + } + } + spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock); + return pkey; +} + +static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp, + u16 *pkey, + u64 *subnet_prefix) +{ + struct ib_device *dev = pp->sec->dev; + int ret; + + ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey); + if (ret) + return ret; + + ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix); + + return ret; +} + +static int enforce_qp_pkey_security(u16 pkey, + u64 subnet_prefix, + struct ib_qp_security *qp_sec) +{ + struct ib_qp_security *shared_qp_sec; + int ret; + + ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey); + if (ret) + return ret; + + if (qp_sec->qp == qp_sec->qp->real_qp) { + list_for_each_entry(shared_qp_sec, + &qp_sec->shared_qp_list, + shared_qp_list) { + ret = security_ib_pkey_access(shared_qp_sec->security, + subnet_prefix, + pkey); + if (ret) + return ret; + } + } + return 0; +} + +/* The caller of this function must hold the QP security + * mutex of the QP of the security structure in *pps. + * + * It takes separate ports_pkeys and security structure + * because in some cases the pps will be for a new settings + * or the pps will be for the real QP and security structure + * will be for a shared QP. + */ +static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps, + struct ib_qp_security *sec) +{ + u64 subnet_prefix; + u16 pkey; + int ret = 0; + + if (!pps) + return 0; + + if (pps->main.state != IB_PORT_PKEY_NOT_VALID) { + get_pkey_and_subnet_prefix(&pps->main, + &pkey, + &subnet_prefix); + + ret = enforce_qp_pkey_security(pkey, + subnet_prefix, + sec); + } + if (ret) + return ret; + + if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) { + get_pkey_and_subnet_prefix(&pps->alt, + &pkey, + &subnet_prefix); + + ret = enforce_qp_pkey_security(pkey, + subnet_prefix, + sec); + } + + return ret; +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static void qp_to_error(struct ib_qp_security *sec) +{ + struct ib_qp_security *shared_qp_sec; + struct ib_qp_attr attr = { + .qp_state = IB_QPS_ERR + }; + struct ib_event event = { + .event = IB_EVENT_QP_FATAL + }; + + /* If the QP is in the process of being destroyed + * the qp pointer in the security structure is + * undefined. It cannot be modified now. + */ + if (sec->destroying) + return; + + ib_modify_qp(sec->qp, + &attr, + IB_QP_STATE); + + if (sec->qp->event_handler && sec->qp->qp_context) { + event.element.qp = sec->qp; + sec->qp->event_handler(&event, + sec->qp->qp_context); + } + + list_for_each_entry(shared_qp_sec, + &sec->shared_qp_list, + shared_qp_list) { + struct ib_qp *qp = shared_qp_sec->qp; + + if (qp->event_handler && qp->qp_context) { + event.element.qp = qp; + event.device = qp->device; + qp->event_handler(&event, + qp->qp_context); + } + } +} + +static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, + struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ + struct ib_port_pkey *pp, *tmp_pp; + bool comp; + LIST_HEAD(to_error_list); + u16 pkey_val; + + if (!ib_get_cached_pkey(device, + port_num, + pkey->pkey_index, + &pkey_val)) { + spin_lock(&pkey->qp_list_lock); + list_for_each_entry(pp, &pkey->qp_list, qp_list) { + if (atomic_read(&pp->sec->error_list_count)) + continue; + + if (enforce_qp_pkey_security(pkey_val, + subnet_prefix, + pp->sec)) { + atomic_inc(&pp->sec->error_list_count); + list_add(&pp->to_error_list, + &to_error_list); + } + } + spin_unlock(&pkey->qp_list_lock); + } + + list_for_each_entry_safe(pp, + tmp_pp, + &to_error_list, + to_error_list) { + mutex_lock(&pp->sec->mutex); + qp_to_error(pp->sec); + list_del(&pp->to_error_list); + atomic_dec(&pp->sec->error_list_count); + comp = pp->sec->destroying; + mutex_unlock(&pp->sec->mutex); + + if (comp) + complete(&pp->sec->error_complete); + } +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static int port_pkey_list_insert(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *tmp_pkey; + struct pkey_index_qp_list *pkey; + struct ib_device *dev; + u8 port_num = pp->port_num; + int ret = 0; + + if (pp->state != IB_PORT_PKEY_VALID) + return 0; + + dev = pp->sec->dev; + + pkey = get_pkey_idx_qp_list(pp); + + if (!pkey) { + bool found = false; + + pkey = kzalloc(sizeof(*pkey), GFP_KERNEL); + if (!pkey) + return -ENOMEM; + + spin_lock(&dev->port_pkey_list[port_num].list_lock); + /* Check for the PKey again. A racing process may + * have created it. + */ + list_for_each_entry(tmp_pkey, + &dev->port_pkey_list[port_num].pkey_list, + pkey_index_list) { + if (tmp_pkey->pkey_index == pp->pkey_index) { + kfree(pkey); + pkey = tmp_pkey; + found = true; + break; + } + } + + if (!found) { + pkey->pkey_index = pp->pkey_index; + spin_lock_init(&pkey->qp_list_lock); + INIT_LIST_HEAD(&pkey->qp_list); + list_add(&pkey->pkey_index_list, + &dev->port_pkey_list[port_num].pkey_list); + } + spin_unlock(&dev->port_pkey_list[port_num].list_lock); + } + + spin_lock(&pkey->qp_list_lock); + list_add(&pp->qp_list, &pkey->qp_list); + spin_unlock(&pkey->qp_list_lock); + + pp->state = IB_PORT_PKEY_LISTED; + + return ret; +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static void port_pkey_list_remove(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *pkey; + + if (pp->state != IB_PORT_PKEY_LISTED) + return; + + pkey = get_pkey_idx_qp_list(pp); + + spin_lock(&pkey->qp_list_lock); + list_del(&pp->qp_list); + spin_unlock(&pkey->qp_list_lock); + + /* The setting may still be valid, i.e. after + * a destroy has failed for example. + */ + pp->state = IB_PORT_PKEY_VALID; +} + +static void destroy_qp_security(struct ib_qp_security *sec) +{ + security_ib_free_security(sec->security); + kfree(sec->ports_pkeys); + kfree(sec); +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, + const struct ib_qp_attr *qp_attr, + int qp_attr_mask) +{ + struct ib_ports_pkeys *new_pps; + struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys; + + new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL); + if (!new_pps) + return NULL; + + if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) { + if (!qp_pps) { + new_pps->main.port_num = qp_attr->port_num; + new_pps->main.pkey_index = qp_attr->pkey_index; + } else { + new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ? + qp_attr->port_num : + qp_pps->main.port_num; + + new_pps->main.pkey_index = + (qp_attr_mask & IB_QP_PKEY_INDEX) ? + qp_attr->pkey_index : + qp_pps->main.pkey_index; + } + new_pps->main.state = IB_PORT_PKEY_VALID; + } else if (qp_pps) { + new_pps->main.port_num = qp_pps->main.port_num; + new_pps->main.pkey_index = qp_pps->main.pkey_index; + if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) + new_pps->main.state = IB_PORT_PKEY_VALID; + } + + if (qp_attr_mask & IB_QP_ALT_PATH) { + new_pps->alt.port_num = qp_attr->alt_port_num; + new_pps->alt.pkey_index = qp_attr->alt_pkey_index; + new_pps->alt.state = IB_PORT_PKEY_VALID; + } else if (qp_pps) { + new_pps->alt.port_num = qp_pps->alt.port_num; + new_pps->alt.pkey_index = qp_pps->alt.pkey_index; + if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID) + new_pps->alt.state = IB_PORT_PKEY_VALID; + } + + new_pps->main.sec = qp->qp_sec; + new_pps->alt.sec = qp->qp_sec; + return new_pps; +} + +int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) +{ + struct ib_qp *real_qp = qp->real_qp; + int ret; + + ret = ib_create_qp_security(qp, dev); + + if (ret) + return ret; + + mutex_lock(&real_qp->qp_sec->mutex); + ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, + qp->qp_sec); + + if (ret) + goto ret; + + if (qp != real_qp) + list_add(&qp->qp_sec->shared_qp_list, + &real_qp->qp_sec->shared_qp_list); +ret: + mutex_unlock(&real_qp->qp_sec->mutex); + if (ret) + destroy_qp_security(qp->qp_sec); + + return ret; +} + +void ib_close_shared_qp_security(struct ib_qp_security *sec) +{ + struct ib_qp *real_qp = sec->qp->real_qp; + + mutex_lock(&real_qp->qp_sec->mutex); + list_del(&sec->shared_qp_list); + mutex_unlock(&real_qp->qp_sec->mutex); + + destroy_qp_security(sec); +} + +int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) +{ + int ret; + + qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); + if (!qp->qp_sec) + return -ENOMEM; + + qp->qp_sec->qp = qp; + qp->qp_sec->dev = dev; + mutex_init(&qp->qp_sec->mutex); + INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list); + atomic_set(&qp->qp_sec->error_list_count, 0); + init_completion(&qp->qp_sec->error_complete); + ret = security_ib_alloc_security(&qp->qp_sec->security); + if (ret) + kfree(qp->qp_sec); + + return ret; +} +EXPORT_SYMBOL(ib_create_qp_security); + +void ib_destroy_qp_security_begin(struct ib_qp_security *sec) +{ + mutex_lock(&sec->mutex); + + /* Remove the QP from the lists so it won't get added to + * a to_error_list during the destroy process. + */ + if (sec->ports_pkeys) { + port_pkey_list_remove(&sec->ports_pkeys->main); + port_pkey_list_remove(&sec->ports_pkeys->alt); + } + + /* If the QP is already in one or more of those lists + * the destroying flag will ensure the to error flow + * doesn't operate on an undefined QP. + */ + sec->destroying = true; + + /* Record the error list count to know how many completions + * to wait for. + */ + sec->error_comps_pending = atomic_read(&sec->error_list_count); + + mutex_unlock(&sec->mutex); +} + +void ib_destroy_qp_security_abort(struct ib_qp_security *sec) +{ + int ret; + int i; + + /* If a concurrent cache update is in progress this + * QP security could be marked for an error state + * transition. Wait for this to complete. + */ + for (i = 0; i < sec->error_comps_pending; i++) + wait_for_completion(&sec->error_complete); + + mutex_lock(&sec->mutex); + sec->destroying = false; + + /* Restore the position in the lists and verify + * access is still allowed in case a cache update + * occurred while attempting to destroy. + * + * Because these setting were listed already + * and removed during ib_destroy_qp_security_begin + * we know the pkey_index_qp_list for the PKey + * already exists so port_pkey_list_insert won't fail. + */ + if (sec->ports_pkeys) { + port_pkey_list_insert(&sec->ports_pkeys->main); + port_pkey_list_insert(&sec->ports_pkeys->alt); + } + + ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec); + if (ret) + qp_to_error(sec); + + mutex_unlock(&sec->mutex); +} + +void ib_destroy_qp_security_end(struct ib_qp_security *sec) +{ + int i; + + /* If a concurrent cache update is occurring we must + * wait until this QP security structure is processed + * in the QP to error flow before destroying it because + * the to_error_list is in use. + */ + for (i = 0; i < sec->error_comps_pending; i++) + wait_for_completion(&sec->error_complete); + + destroy_qp_security(sec); +} + +void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ + struct pkey_index_qp_list *pkey; + + list_for_each_entry(pkey, + &device->port_pkey_list[port_num].pkey_list, + pkey_index_list) { + check_pkey_qps(pkey, + device, + port_num, + subnet_prefix); + } +} + +void ib_security_destroy_port_pkey_list(struct ib_device *device) +{ + struct pkey_index_qp_list *pkey, *tmp_pkey; + int i; + + for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { + spin_lock(&device->port_pkey_list[i].list_lock); + list_for_each_entry_safe(pkey, + tmp_pkey, + &device->port_pkey_list[i].pkey_list, + pkey_index_list) { + list_del(&pkey->pkey_index_list); + kfree(pkey); + } + spin_unlock(&device->port_pkey_list[i].list_lock); + } +} + +int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata) +{ + int ret = 0; + struct ib_ports_pkeys *tmp_pps; + struct ib_ports_pkeys *new_pps; + bool special_qp = (qp->qp_type == IB_QPT_SMI || + qp->qp_type == IB_QPT_GSI || + qp->qp_type >= IB_QPT_RESERVED1); + bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || + (qp_attr_mask & IB_QP_ALT_PATH)); + + if (pps_change && !special_qp) { + mutex_lock(&qp->qp_sec->mutex); + new_pps = get_new_pps(qp, + qp_attr, + qp_attr_mask); + + /* Add this QP to the lists for the new port + * and pkey settings before checking for permission + * in case there is a concurrent cache update + * occurring. Walking the list for a cache change + * doesn't acquire the security mutex unless it's + * sending the QP to error. + */ + ret = port_pkey_list_insert(&new_pps->main); + + if (!ret) + ret = port_pkey_list_insert(&new_pps->alt); + + if (!ret) + ret = check_qp_port_pkey_settings(new_pps, + qp->qp_sec); + } + + if (!ret) + ret = qp->device->modify_qp(qp->real_qp, + qp_attr, + qp_attr_mask, + udata); + + if (pps_change && !special_qp) { + /* Clean up the lists and free the appropriate + * ports_pkeys structure. + */ + if (ret) { + tmp_pps = new_pps; + } else { + tmp_pps = qp->qp_sec->ports_pkeys; + qp->qp_sec->ports_pkeys = new_pps; + } + + if (tmp_pps) { + port_pkey_list_remove(&tmp_pps->main); + port_pkey_list_remove(&tmp_pps->alt); + } + kfree(tmp_pps); + mutex_unlock(&qp->qp_sec->mutex); + } + return ret; +} +EXPORT_SYMBOL(ib_security_modify_qp); + +int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec) +{ + u64 subnet_prefix; + u16 pkey; + int ret; + + ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); + if (ret) + return ret; + + ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix); + + if (ret) + return ret; + + return security_ib_pkey_access(sec, subnet_prefix, pkey); +} +EXPORT_SYMBOL(ib_security_pkey_access); + +static int ib_mad_agent_security_change(struct notifier_block *nb, + unsigned long event, + void *data) +{ + struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb); + + if (event != LSM_POLICY_CHANGE) + return NOTIFY_DONE; + + ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security, + ag->device->name, + ag->port_num); + + return NOTIFY_OK; +} + +int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type) +{ + int ret; + + ret = security_ib_alloc_security(&agent->security); + if (ret) + return ret; + + if (qp_type != IB_QPT_SMI) + return 0; + + ret = security_ib_endport_manage_subnet(agent->security, + agent->device->name, + agent->port_num); + if (ret) + return ret; + + agent->lsm_nb.notifier_call = ib_mad_agent_security_change; + ret = register_lsm_notifier(&agent->lsm_nb); + if (ret) + return ret; + + agent->smp_allowed = true; + agent->lsm_nb_reg = true; + return 0; +} + +void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) +{ + security_ib_free_security(agent->security); + if (agent->lsm_nb_reg) + unregister_lsm_notifier(&agent->lsm_nb); +} + +int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) +{ + int ret; + + if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) + return -EACCES; + + ret = ib_security_pkey_access(map->agent.device, + map->agent.port_num, + pkey_index, + map->agent.security); + + if (ret) + return ret; + + return 0; +} + +#endif /* CONFIG_SECURITY_INFINIBAND */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 70b7fb156414..8ba9bfb073d1 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1508,6 +1508,10 @@ static int create_qp(struct ib_uverbs_file *file, } if (cmd->qp_type != IB_QPT_XRC_TGT) { + ret = ib_create_qp_security(qp, device); + if (ret) + goto err_cb; + qp->real_qp = qp; qp->device = device; qp->pd = pd; @@ -1931,6 +1935,11 @@ static int modify_qp(struct ib_uverbs_file *file, goto out; } + if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) { + ret = -EINVAL; + goto release_qp; + } + attr->qp_state = cmd->base.qp_state; attr->cur_qp_state = cmd->base.cur_qp_state; attr->path_mtu = cmd->base.path_mtu; @@ -2002,14 +2011,17 @@ static int modify_qp(struct ib_uverbs_file *file, if (ret) goto release_qp; } - ret = qp->device->modify_qp(qp, attr, + ret = ib_security_modify_qp(qp, + attr, modify_qp_mask(qp->qp_type, cmd->base.attr_mask), udata); } else { - ret = ib_modify_qp(qp, attr, - modify_qp_mask(qp->qp_type, - cmd->base.attr_mask)); + ret = ib_security_modify_qp(qp, + attr, + modify_qp_mask(qp->qp_type, + cmd->base.attr_mask), + NULL); } release_qp: @@ -2541,6 +2553,9 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) + return -EINVAL; + INIT_UDATA(&udata, buf + sizeof(cmd), (unsigned long)cmd.response + sizeof(resp), in_len - sizeof(cmd), out_len - sizeof(resp)); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 4792f5209ac2..c973a83c898b 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -44,6 +44,7 @@ #include <linux/in.h> #include <linux/in6.h> #include <net/addrconf.h> +#include <linux/security.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> @@ -713,12 +714,20 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, { struct ib_qp *qp; unsigned long flags; + int err; qp = kzalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->real_qp = real_qp; + err = ib_open_shared_qp_security(qp, real_qp->device); + if (err) { + kfree(qp); + return ERR_PTR(err); + } + + qp->real_qp = real_qp; atomic_inc(&real_qp->usecnt); qp->device = real_qp->device; qp->event_handler = event_handler; @@ -804,6 +813,12 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, if (IS_ERR(qp)) return qp; + ret = ib_create_qp_security(qp, device); + if (ret) { + ib_destroy_qp(qp); + return ERR_PTR(ret); + } + qp->device = device; qp->real_qp = qp; qp->uobject = NULL; @@ -1266,7 +1281,7 @@ int ib_modify_qp(struct ib_qp *qp, return ret; } - return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); + return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_qp); @@ -1295,6 +1310,7 @@ int ib_close_qp(struct ib_qp *qp) spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); atomic_dec(&real_qp->usecnt); + ib_close_shared_qp_security(qp->qp_sec); kfree(qp); return 0; @@ -1335,6 +1351,7 @@ int ib_destroy_qp(struct ib_qp *qp) struct ib_cq *scq, *rcq; struct ib_srq *srq; struct ib_rwq_ind_table *ind_tbl; + struct ib_qp_security *sec; int ret; WARN_ON_ONCE(qp->mrs_used > 0); @@ -1350,6 +1367,9 @@ int ib_destroy_qp(struct ib_qp *qp) rcq = qp->recv_cq; srq = qp->srq; ind_tbl = qp->rwq_ind_tbl; + sec = qp->qp_sec; + if (sec) + ib_destroy_qp_security_begin(sec); if (!qp->uobject) rdma_rw_cleanup_mrs(qp); @@ -1366,6 +1386,11 @@ int ib_destroy_qp(struct ib_qp *qp) atomic_dec(&srq->usecnt); if (ind_tbl) atomic_dec(&ind_tbl->usecnt); + if (sec) + ib_destroy_qp_security_end(sec); + } else { + if (sec) + ib_destroy_qp_security_abort(sec); } return ret; diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ebf7be8d4139..08772836fded 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -56,6 +56,10 @@ #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) +#define BNXT_RE_UD_QP_HW_STALL 0x400000 + +#define BNXT_RE_RQ_WQE_THRESHOLD 32 + struct bnxt_re_work { struct work_struct work; unsigned long event; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 7ba9e699d7ab..c7bd68311d0c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -61,6 +61,48 @@ #include "ib_verbs.h" #include <rdma/bnxt_re-abi.h> +static int __from_ib_access_flags(int iflags) +{ + int qflags = 0; + + if (iflags & IB_ACCESS_LOCAL_WRITE) + qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; + if (iflags & IB_ACCESS_REMOTE_READ) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; + if (iflags & IB_ACCESS_REMOTE_WRITE) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; + if (iflags & IB_ACCESS_REMOTE_ATOMIC) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; + if (iflags & IB_ACCESS_MW_BIND) + qflags |= BNXT_QPLIB_ACCESS_MW_BIND; + if (iflags & IB_ZERO_BASED) + qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; + if (iflags & IB_ACCESS_ON_DEMAND) + qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; + return qflags; +}; + +static enum ib_access_flags __to_ib_access_flags(int qflags) +{ + enum ib_access_flags iflags = 0; + + if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) + iflags |= IB_ACCESS_LOCAL_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) + iflags |= IB_ACCESS_REMOTE_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) + iflags |= IB_ACCESS_REMOTE_READ; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) + iflags |= IB_ACCESS_REMOTE_ATOMIC; + if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) + iflags |= IB_ACCESS_MW_BIND; + if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) + iflags |= IB_ZERO_BASED; + if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) + iflags |= IB_ACCESS_ON_DEMAND; + return iflags; +}; + static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, struct bnxt_qplib_sge *sg_list, int num) { @@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->max_total_mcast_qp_attach = 0; ib_attr->max_ah = dev_attr->max_ah; - ib_attr->max_fmr = dev_attr->max_fmr; - ib_attr->max_map_per_fmr = 1; /* ? */ + ib_attr->max_fmr = 0; + ib_attr->max_map_per_fmr = 0; ib_attr->max_srq = dev_attr->max_srq; ib_attr->max_srq_wr = dev_attr->max_srq_wqes; @@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_ETHERNET; } +#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) + +static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) +{ + struct bnxt_re_fence_data *fence = &pd->fence; + struct ib_mr *ib_mr = &fence->mr->ib_mr; + struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; + + memset(wqe, 0, sizeof(*wqe)); + wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; + wqe->wr_id = BNXT_QPLIB_FENCE_WRID; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + wqe->bind.zero_based = false; + wqe->bind.parent_l_key = ib_mr->lkey; + wqe->bind.va = (u64)(unsigned long)fence->va; + wqe->bind.length = fence->size; + wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); + wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; + + /* Save the initial rkey in fence structure for now; + * wqe->bind.r_key will be set at (re)bind time. + */ + fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); +} + +static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) +{ + struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, + qplib_qp); + struct ib_pd *ib_pd = qp->ib_qp.pd; + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; + struct bnxt_qplib_swqe wqe; + int rc; + + memcpy(&wqe, fence_wqe, sizeof(wqe)); + wqe.bind.r_key = fence->bind_rkey; + fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); + + dev_dbg(rdev_to_dev(qp->rdev), + "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", + wqe.bind.r_key, qp->qplib_qp.id, pd); + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); + if (rc) { + dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); + return rc; + } + bnxt_qplib_post_send_db(&qp->qplib_qp); + + return rc; +} + +static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) +{ + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct device *dev = &rdev->en_dev->pdev->dev; + struct bnxt_re_mr *mr = fence->mr; + + if (fence->mw) { + bnxt_re_dealloc_mw(fence->mw); + fence->mw = NULL; + } + if (mr) { + if (mr->ib_mr.rkey) + bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, + true); + if (mr->ib_mr.lkey) + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + kfree(mr); + fence->mr = NULL; + } + if (fence->dma_addr) { + dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, + DMA_BIDIRECTIONAL); + fence->dma_addr = 0; + } +} + +static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) +{ + int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct device *dev = &rdev->en_dev->pdev->dev; + struct bnxt_re_mr *mr = NULL; + dma_addr_t dma_addr = 0; + struct ib_mw *mw; + u64 pbl_tbl; + int rc; + + dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, + DMA_BIDIRECTIONAL); + rc = dma_mapping_error(dev, dma_addr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); + rc = -EIO; + fence->dma_addr = 0; + goto fail; + } + fence->dma_addr = dma_addr; + + /* Allocate a MR */ + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + rc = -ENOMEM; + goto fail; + } + fence->mr = mr; + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); + goto fail; + } + + /* Register MR */ + mr->ib_mr.lkey = mr->qplib_mr.lkey; + mr->qplib_mr.va = (u64)(unsigned long)fence->va; + mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; + pbl_tbl = dma_addr; + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, + BNXT_RE_FENCE_PBL_SIZE, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); + goto fail; + } + mr->ib_mr.rkey = mr->qplib_mr.rkey; + + /* Create a fence MW only for kernel consumers */ + mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); + if (!mw) { + dev_err(rdev_to_dev(rdev), + "Failed to create fence-MW for PD: %p\n", pd); + rc = -EINVAL; + goto fail; + } + fence->mw = mw; + + bnxt_re_create_fence_wqe(pd); + return 0; + +fail: + bnxt_re_destroy_fence_mr(pd); + return rc; +} + /* Protection Domains */ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) { @@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) struct bnxt_re_dev *rdev = pd->rdev; int rc; + bnxt_re_destroy_fence_mr(pd); if (ib_pd->uobject && pd->dpi.dbr) { struct ib_ucontext *ib_uctx = ib_pd->uobject->context; struct bnxt_re_ucontext *ucntx; @@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, } } + if (!udata) + if (bnxt_re_create_fence_mr(pd)) + dev_warn(rdev_to_dev(rdev), + "Failed to create Fence-MR\n"); return &pd->ib_pd; dbfail: (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, @@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp /* Shadow QP SQ depth should be same as QP1 RQ depth */ qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.sq.max_sge = 2; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.sq.q_full_delta = 1; qp->qplib_qp.scq = qp1_qp->scq; qp->qplib_qp.rcq = qp1_qp->rcq; qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.rq.q_full_delta = 1; qp->qplib_qp.mtu = qp1_qp->mtu; @@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false); - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + 1); - qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; @@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - + qp_init_attr->cap.max_recv_wr; + qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; @@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); if (qp_init_attr->qp_type == IB_QPT_GSI) { + /* Allocate 1 more than what's provided */ + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + 1); + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - + qp_init_attr->cap.max_send_wr; qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; @@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, } } else { + /* Allocate 128 + 1 more than what's provided */ + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; + + /* + * Reserving one slot for Phantom WQE. Application can + * post one extra entry in this case. But allowing this to avoid + * unexpected Queue full condition + */ + + qp->qplib_qp.sq.q_full_delta -= 1; + qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; if (udata) { @@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->ib_qp.qp_num = qp->qplib_qp.id; spin_lock_init(&qp->sq_lock); + spin_lock_init(&qp->rq_lock); if (udata) { struct bnxt_re_qp_resp resp; @@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu) } } -static int __from_ib_access_flags(int iflags) -{ - int qflags = 0; - - if (iflags & IB_ACCESS_LOCAL_WRITE) - qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; - if (iflags & IB_ACCESS_REMOTE_READ) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; - if (iflags & IB_ACCESS_REMOTE_WRITE) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; - if (iflags & IB_ACCESS_REMOTE_ATOMIC) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; - if (iflags & IB_ACCESS_MW_BIND) - qflags |= BNXT_QPLIB_ACCESS_MW_BIND; - if (iflags & IB_ZERO_BASED) - qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; - if (iflags & IB_ACCESS_ON_DEMAND) - qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; - return qflags; -}; - -static enum ib_access_flags __to_ib_access_flags(int qflags) -{ - enum ib_access_flags iflags = 0; - - if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) - iflags |= IB_ACCESS_LOCAL_WRITE; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) - iflags |= IB_ACCESS_REMOTE_WRITE; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) - iflags |= IB_ACCESS_REMOTE_READ; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) - iflags |= IB_ACCESS_REMOTE_ATOMIC; - if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) - iflags |= IB_ACCESS_MW_BIND; - if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) - iflags |= IB_ZERO_BASED; - if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) - iflags |= IB_ACCESS_ON_DEMAND; - return iflags; -}; - static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp1_qp, int qp_attr_mask) @@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); qp->qplib_qp.sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - + qp_attr->cap.max_send_wr; + /* + * Reserving one slot for Phantom WQE. Some application can + * post one extra entry in this case. Allowing this to avoid + * unexpected Queue full condition + */ + qp->qplib_qp.sq.q_full_delta -= 1; qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; if (qp->qplib_qp.rq.max_wqe) { entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - + qp_attr->cap.max_recv_wr; qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; } else { /* SRQ was used prior, just ignore the RQ caps */ @@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, return payload_sz; } +static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) +{ + if ((qp->ib_qp.qp_type == IB_QPT_UD || + qp->ib_qp.qp_type == IB_QPT_GSI || + qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && + qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { + int qp_attr_mask; + struct ib_qp_attr qp_attr; + + qp_attr_mask = IB_QP_STATE; + qp_attr.qp_state = IB_QPS_RTS; + bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); + qp->qplib_qp.wqe_cnt = 0; + } +} + static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp, struct ib_send_wr *wr) @@ -1928,6 +2137,7 @@ bad: wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); + bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; } @@ -2024,6 +2234,7 @@ bad: wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); + bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; @@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_qplib_swqe wqe; int rc = 0, payload_sz = 0; + unsigned long flags; + u32 count = 0; + spin_lock_irqsave(&qp->rq_lock, flags); while (wr) { /* House keeping */ memset(&wqe, 0, sizeof(wqe)); @@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, *bad_wr = wr; break; } + + /* Ring DB if the RQEs posted reaches a threshold value */ + if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { + bnxt_qplib_post_recv_db(&qp->qplib_qp); + count = 0; + } + wr = wr->next; } - bnxt_qplib_post_recv_db(&qp->qplib_qp); + + if (count) + bnxt_qplib_post_recv_db(&qp->qplib_qp); + + spin_unlock_irqrestore(&qp->rq_lock, flags); + return rc; } @@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc, wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; } +static int send_phantom_wqe(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&qp->sq_lock, flags); + + rc = bnxt_re_bind_fence_mw(lib_qp); + if (!rc) { + lib_qp->sq.phantom_wqe_cnt++; + dev_dbg(&lib_qp->sq.hwq.pdev->dev, + "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", + lib_qp->id, lib_qp->sq.hwq.prod, + HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), + lib_qp->sq.phantom_wqe_cnt); + } + + spin_unlock_irqrestore(&qp->sq_lock, flags); + return rc; +} + int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) { struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); struct bnxt_re_qp *qp; struct bnxt_qplib_cqe *cqe; int i, ncqe, budget; + struct bnxt_qplib_q *sq; + struct bnxt_qplib_qp *lib_qp; u32 tbl_idx; struct bnxt_re_sqp_entries *sqp_entry = NULL; unsigned long flags; @@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) } cqe = &cq->cql[0]; while (budget) { - ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); + lib_qp = NULL; + ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); + if (lib_qp) { + sq = &lib_qp->sq; + if (sq->send_phantom) { + qp = container_of(lib_qp, + struct bnxt_re_qp, qplib_qp); + if (send_phantom_wqe(qp) == -ENOMEM) + dev_err(rdev_to_dev(cq->rdev), + "Phantom failed! Scheduled to send again\n"); + else + sq->send_phantom = false; + } + } + if (!ncqe) break; @@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) struct bnxt_re_dev *rdev = mr->rdev; int rc; + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + return rc; + } + if (mr->npages && mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl); @@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) mr->npages = 0; mr->pages = NULL; } - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (!IS_ERR_OR_NULL(mr->ib_umem)) ib_umem_release(mr->ib_umem); @@ -2914,97 +3182,52 @@ fail: return ERR_PTR(rc); } -/* Fast Memory Regions */ -struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr) +struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; - struct bnxt_re_fmr *fmr; + struct bnxt_re_mw *mw; int rc; - if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || - fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { - dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit"); + mw = kzalloc(sizeof(*mw), GFP_KERNEL); + if (!mw) return ERR_PTR(-ENOMEM); - } - fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); - if (!fmr) - return ERR_PTR(-ENOMEM); - - fmr->rdev = rdev; - fmr->qplib_fmr.pd = &pd->qplib_pd; - fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + mw->rdev = rdev; + mw->qplib_mw.pd = &pd->qplib_pd; - rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); - if (rc) + mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); goto fail; + } + mw->ib_mw.rkey = mw->qplib_mw.rkey; - fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); - fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; - fmr->ib_fmr.rkey = fmr->ib_fmr.lkey; + atomic_inc(&rdev->mw_count); + return &mw->ib_mw; - atomic_inc(&rdev->mr_count); - return &fmr->ib_fmr; fail: - kfree(fmr); + kfree(mw); return ERR_PTR(rc); } -int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, - u64 iova) +int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) { - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, - ib_fmr); - struct bnxt_re_dev *rdev = fmr->rdev; + struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); + struct bnxt_re_dev *rdev = mw->rdev; int rc; - fmr->qplib_fmr.va = iova; - fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; - - rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, - list_len, true); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!", - fmr->ib_fmr.lkey); - return rc; -} - -int bnxt_re_unmap_fmr(struct list_head *fmr_list) -{ - struct bnxt_re_dev *rdev; - struct bnxt_re_fmr *fmr; - struct ib_fmr *ib_fmr; - int rc = 0; - - /* Validate each FMRs inside the fmr_list */ - list_for_each_entry(ib_fmr, fmr_list, list) { - fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr); - rdev = fmr->rdev; - - if (rdev) { - rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res, - &fmr->qplib_fmr, true); - if (rc) - break; - } + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); + return rc; } - return rc; -} - -int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) -{ - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, - ib_fmr); - struct bnxt_re_dev *rdev = fmr->rdev; - int rc; - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to free FMR"); - - kfree(fmr); - atomic_dec(&rdev->mr_count); + kfree(mw); + atomic_dec(&rdev->mw_count); return rc; } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 5c3d71765454..6c160f6a5398 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx { u32 refcnt; }; +#define BNXT_RE_FENCE_BYTES 64 +struct bnxt_re_fence_data { + u32 size; + u8 va[BNXT_RE_FENCE_BYTES]; + dma_addr_t dma_addr; + struct bnxt_re_mr *mr; + struct ib_mw *mw; + struct bnxt_qplib_swqe bind_wqe; + u32 bind_rkey; +}; + struct bnxt_re_pd { struct bnxt_re_dev *rdev; struct ib_pd ib_pd; struct bnxt_qplib_pd qplib_pd; struct bnxt_qplib_dpi dpi; + struct bnxt_re_fence_data fence; }; struct bnxt_re_ah { @@ -62,6 +74,7 @@ struct bnxt_re_qp { struct bnxt_re_dev *rdev; struct ib_qp ib_qp; spinlock_t sq_lock; /* protect sq */ + spinlock_t rq_lock; /* protect rq */ struct bnxt_qplib_qp qplib_qp; struct ib_umem *sumem; struct ib_umem *rumem; @@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, u32 max_num_sg); int bnxt_re_dereg_mr(struct ib_mr *mr); -struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr); -int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, - u64 iova); -int bnxt_re_unmap_fmr(struct list_head *fmr_list); -int bnxt_re_dealloc_fmr(struct ib_fmr *fmr); +struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata); +int bnxt_re_dealloc_mw(struct ib_mw *mw); struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 5d355401179b..1fce5e73216b 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) ibdev->dereg_mr = bnxt_re_dereg_mr; ibdev->alloc_mr = bnxt_re_alloc_mr; ibdev->map_mr_sg = bnxt_re_map_mr_sg; - ibdev->alloc_fmr = bnxt_re_alloc_fmr; - ibdev->map_phys_fmr = bnxt_re_map_phys_fmr; - ibdev->unmap_fmr = bnxt_re_unmap_fmr; - ibdev->dealloc_fmr = bnxt_re_dealloc_fmr; ibdev->reg_user_mr = bnxt_re_reg_user_mr; ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 43d08b5e9085..f05500bcdcf1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_qp1 req; - struct creq_create_qp1_resp *resp; + struct creq_create_qp1_resp resp; struct bnxt_qplib_pbl *pbl; struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *rq = &qp->rq; @@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.pd_id = cpu_to_le32(qp->pd->id); - resp = (struct creq_create_qp1_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed"); - rc = -EINVAL; - goto fail; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - qp->id = le32_to_cpu(resp->xid); + + qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; sq->flush_in_progress = false; rq->flush_in_progress = false; @@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; struct cmdq_create_qp req; - struct creq_create_qp_resp *resp; + struct creq_create_qp_resp resp; struct bnxt_qplib_pbl *pbl; struct sq_psn_search **psn_search_ptr; unsigned long int psn_search, poff = 0; @@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) } req.pd_id = cpu_to_le32(qp->pd->id); - resp = (struct creq_create_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed"); - rc = -EINVAL; - goto fail; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - qp->id = le32_to_cpu(resp->xid); + + qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; sq->flush_in_progress = false; rq->flush_in_progress = false; @@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_modify_qp req; - struct creq_modify_qp_resp *resp; + struct creq_modify_qp_resp resp; u16 cmd_flags = 0, pkey; u32 temp32[4]; u32 bmask; + int rc; RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); @@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); - resp = (struct creq_modify_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; qp->cur_qp_state = qp->state; return 0; } @@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_query_qp req; - struct creq_query_qp_resp *resp; + struct creq_query_qp_resp resp; + struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_qp_resp_sb *sb; u16 cmd_flags = 0; u32 temp32[4]; - int i; + int i, rc = 0; RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); + if (!sbuf) + return -ENOMEM; + sb = sbuf->sb; + req.qp_cid = cpu_to_le32(qp->id); req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - resp = (struct creq_query_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void **)&sb, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + (void *)sbuf, 0); + if (rc) + goto bail; /* Extract the context from the side buffer */ qp->state = sb->en_sqd_async_notify_state & CREQ_QUERY_QP_RESP_SB_STATE_MASK; @@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); memcpy(qp->smac, sb->src_mac, 6); qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); - return 0; +bail: + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); + return rc; } static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) @@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_qp req; - struct creq_destroy_qp_resp *resp; + struct creq_destroy_qp_resp resp; unsigned long flags; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); req.qp_cid = cpu_to_le32(qp->id); - resp = (struct creq_destroy_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; /* Must walk the associated CQs to nullified the QP ptr */ spin_lock_irqsave(&qp->scq->hwq.lock, flags); @@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, rc = -EINVAL; goto done; } - if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == - HWQ_CMP(sq->hwq.cons, &sq->hwq)) { + + if (bnxt_qplib_queue_full(sq)) { + dev_err(&sq->hwq.pdev->dev, + "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x", + sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements, + sq->q_full_delta); rc = -ENOMEM; goto done; } @@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, } sq->hwq.prod++; + + qp->wqe_cnt++; + done: return rc; } @@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, rc = -EINVAL; goto done; } - if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == - HWQ_CMP(rq->hwq.cons, &rq->hwq)) { + if (bnxt_qplib_queue_full(rq)) { dev_err(&rq->hwq.pdev->dev, "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); rc = -EINVAL; @@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_cq req; - struct creq_create_cq_resp *resp; + struct creq_create_cq_resp resp; struct bnxt_qplib_pbl *pbl; u16 cmd_flags = 0; int rc; @@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << CMDQ_CREATE_CQ_CNQ_ID_SFT); - resp = (struct creq_create_cq_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - cq->id = le32_to_cpu(resp->xid); + + cq->id = le32_to_cpu(resp.xid); cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; init_waitqueue_head(&cq->waitq); @@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_cq req; - struct creq_destroy_cq_resp *resp; + struct creq_destroy_cq_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); req.cq_cid = cpu_to_le32(cq->id); - resp = (struct creq_destroy_cq_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; bnxt_qplib_free_hwq(res->pdev, &cq->hwq); return 0; } @@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, return rc; } +/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) + * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 + */ +static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, + u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons) +{ + struct bnxt_qplib_q *sq = &qp->sq; + struct bnxt_qplib_swq *swq; + u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx; + struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr; + struct cq_req *peek_req_hwcqe; + struct bnxt_qplib_qp *peek_qp; + struct bnxt_qplib_q *peek_sq; + int i, rc = 0; + + /* Normal mode */ + /* Check for the psn_search marking before completing */ + swq = &sq->swq[sw_sq_cons]; + if (swq->psn_search && + le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { + /* Unmark */ + swq->psn_search->flags_next_psn = cpu_to_le32 + (le32_to_cpu(swq->psn_search->flags_next_psn) + & ~0x80000000); + dev_dbg(&cq->hwq.pdev->dev, + "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); + sq->condition = true; + sq->send_phantom = true; + + /* TODO: Only ARM if the previous SQE is ARMALL */ + bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL); + + rc = -EAGAIN; + goto out; + } + if (sq->condition) { + /* Peek at the completions */ + peek_raw_cq_cons = cq->hwq.cons; + peek_sw_cq_cons = cq_cons; + i = cq->hwq.max_elements; + while (i--) { + peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); + peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; + peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)] + [CQE_IDX(peek_sw_cq_cons)]; + /* If the next hwcqe is VALID */ + if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons, + cq->hwq.max_elements)) { + /* If the next hwcqe is a REQ */ + if ((peek_hwcqe->cqe_type_toggle & + CQ_BASE_CQE_TYPE_MASK) == + CQ_BASE_CQE_TYPE_REQ) { + peek_req_hwcqe = (struct cq_req *) + peek_hwcqe; + peek_qp = (struct bnxt_qplib_qp *) + ((unsigned long) + le64_to_cpu + (peek_req_hwcqe->qp_handle)); + peek_sq = &peek_qp->sq; + peek_sq_cons_idx = HWQ_CMP(le16_to_cpu( + peek_req_hwcqe->sq_cons_idx) - 1 + , &sq->hwq); + /* If the hwcqe's sq's wr_id matches */ + if (peek_sq == sq && + sq->swq[peek_sq_cons_idx].wr_id == + BNXT_QPLIB_FENCE_WRID) { + /* + * Unbreak only if the phantom + * comes back + */ + dev_dbg(&cq->hwq.pdev->dev, + "FP:Got Phantom CQE"); + sq->condition = false; + sq->single = true; + rc = 0; + goto out; + } + } + /* Valid but not the phantom, so keep looping */ + } else { + /* Not valid yet, just exit and wait */ + rc = -EINVAL; + goto out; + } + peek_sw_cq_cons++; + peek_raw_cq_cons++; + } + dev_err(&cq->hwq.pdev->dev, + "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x", + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); + rc = -EINVAL; + } +out: + return rc; +} + static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, struct cq_req *hwcqe, - struct bnxt_qplib_cqe **pcqe, int *budget) + struct bnxt_qplib_cqe **pcqe, int *budget, + u32 cq_cons, struct bnxt_qplib_qp **lib_qp) { struct bnxt_qplib_qp *qp; struct bnxt_qplib_q *sq; struct bnxt_qplib_cqe *cqe; - u32 sw_cons, cqe_cons; + u32 sw_sq_cons, cqe_sq_cons; + struct bnxt_qplib_swq *swq; int rc = 0; qp = (struct bnxt_qplib_qp *)((unsigned long) @@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, } sq = &qp->sq; - cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); - if (cqe_cons > sq->hwq.max_elements) { + cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); + if (cqe_sq_cons > sq->hwq.max_elements) { dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process req reported "); dev_err(&cq->hwq.pdev->dev, "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", - cqe_cons, sq->hwq.max_elements); + cqe_sq_cons, sq->hwq.max_elements); return -EINVAL; } /* If we were in the middle of flushing the SQ, continue */ @@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, /* Require to walk the sq's swq to fabricate CQEs for all previously * signaled SWQEs due to CQE aggregation from the current sq cons - * to the cqe_cons + * to the cqe_sq_cons */ cqe = *pcqe; while (*budget) { - sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); - if (sw_cons == cqe_cons) + sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); + if (sw_sq_cons == cqe_sq_cons) + /* Done */ break; + + swq = &sq->swq[sw_sq_cons]; memset(cqe, 0, sizeof(*cqe)); cqe->opcode = CQ_BASE_CQE_TYPE_REQ; cqe->qp_handle = (u64)(unsigned long)qp; cqe->src_qp = qp->id; - cqe->wr_id = sq->swq[sw_cons].wr_id; - cqe->type = sq->swq[sw_cons].type; + cqe->wr_id = swq->wr_id; + if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) + goto skip; + cqe->type = swq->type; /* For the last CQE, check for status. For errors, regardless * of the request being signaled or not, it must complete with * the hwcqe error status */ - if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && + if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons && hwcqe->status != CQ_REQ_STATUS_OK) { cqe->status = hwcqe->status; dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Req "); dev_err(&cq->hwq.pdev->dev, "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", - sw_cons, cqe->wr_id, cqe->status); + sw_sq_cons, cqe->wr_id, cqe->status); cqe++; (*budget)--; sq->flush_in_progress = true; /* Must block new posting of SQ and RQ */ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; + sq->condition = false; + sq->single = false; } else { - if (sq->swq[sw_cons].flags & - SQ_SEND_FLAGS_SIGNAL_COMP) { + if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { + /* Before we complete, do WA 9060 */ + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } cqe->status = CQ_REQ_STATUS_OK; cqe++; (*budget)--; } } +skip: sq->hwq.cons++; + if (sq->single) + break; } +out: *pcqe = cqe; - if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { + if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) { /* Out of budget */ rc = -EAGAIN; goto done; } + /* + * Back to normal completion mode only after it has completed all of + * the WC for this CQE + */ + sq->single = false; if (!sq->flush_in_progress) goto done; flush: @@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, } int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, - int num_cqes) + int num_cqes, struct bnxt_qplib_qp **lib_qp) { struct cq_base *hw_cqe, **hw_cqe_ptr; unsigned long flags; @@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, case CQ_BASE_CQE_TYPE_REQ: rc = bnxt_qplib_cq_process_req(cq, (struct cq_req *)hw_cqe, - &cqe, &budget); + &cqe, &budget, + sw_cons, lib_qp); break; case CQ_BASE_CQE_TYPE_RES_RC: rc = bnxt_qplib_cq_process_res_rc(cq, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index f0150f8da1e3..36b7b7db0e3f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -88,6 +88,7 @@ struct bnxt_qplib_swq { struct bnxt_qplib_swqe { /* General */ +#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ u64 wr_id; u8 reqs_type; u8 type; @@ -216,9 +217,16 @@ struct bnxt_qplib_q { struct scatterlist *sglist; u32 nmap; u32 max_wqe; + u16 q_full_delta; u16 max_sge; u32 psn; bool flush_in_progress; + bool condition; + bool single; + bool send_phantom; + u32 phantom_wqe_cnt; + u32 phantom_cqe_cnt; + u32 next_cq_cons; }; struct bnxt_qplib_qp { @@ -242,6 +250,7 @@ struct bnxt_qplib_qp { u8 timeout; u8 retry_cnt; u8 rnr_retry; + u64 wqe_cnt; u32 min_rnr_timer; u32 max_rd_atomic; u32 max_dest_rd_atomic; @@ -301,6 +310,13 @@ struct bnxt_qplib_qp { (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ !((raw_cons) & (cp_bit))) +static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q) +{ + return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta), + &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons, + &qplib_q->hwq); +} + struct bnxt_qplib_cqe { u8 status; u8 type; @@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, - int num); + int num, struct bnxt_qplib_qp **qp); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 23fb7260662b..16e42754dbec 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -39,72 +39,55 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/prefetch.h> +#include <linux/delay.h> + #include "roce_hsi.h" #include "qplib_res.h" #include "qplib_rcfw.h" static void bnxt_qplib_service_creq(unsigned long data); /* Hardware communication channel */ -int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { u16 cbit; int rc; - cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; - if (!test_bit(cbit, rcfw->cmdq_bitmap)) - dev_warn(&rcfw->pdev->dev, - "QPLIB: CMD bit %d for cookie 0x%x is not set?", - cbit, cookie); - rc = wait_event_timeout(rcfw->waitq, !test_bit(cbit, rcfw->cmdq_bitmap), msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); - if (!rc) { - dev_warn(&rcfw->pdev->dev, - "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n", - RCFW_CMD_WAIT_TIME_MS, cookie); - } - - return rc; + return rc ? 0 : -ETIMEDOUT; }; -int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { - u32 count = -1; + u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; u16 cbit; - cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; if (!test_bit(cbit, rcfw->cmdq_bitmap)) goto done; do { + mdelay(1); /* 1m sec */ bnxt_qplib_service_creq((unsigned long)rcfw); } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); done: - return count; + return count ? 0 : -ETIMEDOUT; }; -void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, void **crsbe, - u8 is_block) +static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, + struct creq_base *resp, void *sb, u8 is_block) { - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; - struct bnxt_qplib_hwq *crsb = &rcfw->crsb; - struct bnxt_qplib_crsqe *crsqe = NULL; - struct bnxt_qplib_crsbe **crsb_ptr; + struct bnxt_qplib_crsq *crsqe; u32 sw_prod, cmdq_prod; - u8 retry_cnt = 0xFF; - dma_addr_t dma_addr; unsigned long flags; u32 size, opcode; u16 cookie, cbit; int pg, idx; u8 *preq; -retry: opcode = req->opcode; if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && @@ -112,63 +95,50 @@ retry: dev_err(&rcfw->pdev->dev, "QPLIB: RCFW not initialized, reject opcode 0x%x", opcode); - return NULL; + return -EINVAL; } if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); - return NULL; + return -EINVAL; } /* Cmdq are in 16-byte units, each request can consume 1 or more * cmdqe */ spin_lock_irqsave(&cmdq->lock, flags); - if (req->cmd_size > cmdq->max_elements - - ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) & - (cmdq->max_elements - 1))) { + if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); spin_unlock_irqrestore(&cmdq->lock, flags); - - if (!retry_cnt--) - return NULL; - goto retry; + return -EAGAIN; } - retry_cnt = 0xFF; - cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; + cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; if (is_block) cookie |= RCFW_CMD_IS_BLOCKING; + + set_bit(cbit, rcfw->cmdq_bitmap); req->cookie = cpu_to_le16(cookie); - if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW MAX outstanding cmd reached!"); - atomic_dec(&rcfw->seq_num); + crsqe = &rcfw->crsqe_tbl[cbit]; + if (crsqe->resp) { spin_unlock_irqrestore(&cmdq->lock, flags); - - if (!retry_cnt--) - return NULL; - goto retry; + return -EBUSY; } - /* Reserve a resp buffer slot if requested */ - if (req->resp_size && crsbe) { - spin_lock(&crsb->lock); - sw_prod = HWQ_CMP(crsb->prod, crsb); - crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; - *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] - [get_crsb_idx(sw_prod)]; - bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); - req->resp_addr = cpu_to_le64(dma_addr); - crsb->prod++; - spin_unlock(&crsb->lock); - - req->resp_size = (sizeof(struct bnxt_qplib_crsbe) + - BNXT_QPLIB_CMDQE_UNITS - 1) / - BNXT_QPLIB_CMDQE_UNITS; + memset(resp, 0, sizeof(*resp)); + crsqe->resp = (struct creq_qp_event *)resp; + crsqe->resp->cookie = req->cookie; + crsqe->req_size = req->cmd_size; + if (req->resp_size && sb) { + struct bnxt_qplib_rcfw_sbuf *sbuf = sb; + + req->resp_addr = cpu_to_le64(sbuf->dma_addr); + req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; } + cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; preq = (u8 *)req; size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; @@ -190,23 +160,24 @@ retry: preq += min_t(u32, size, sizeof(*cmdqe)); size -= min_t(u32, size, sizeof(*cmdqe)); cmdq->prod++; + rcfw->seq_num++; } while (size > 0); + rcfw->seq_num++; + cmdq_prod = cmdq->prod; if (rcfw->flags & FIRMWARE_FIRST_FLAG) { - /* The very first doorbell write is required to set this flag - * which prompts the FW to reset its internal pointers + /* The very first doorbell write + * is required to set this flag + * which prompts the FW to reset + * its internal pointers */ cmdq_prod |= FIRMWARE_FIRST_FLAG; rcfw->flags &= ~FIRMWARE_FIRST_FLAG; } - sw_prod = HWQ_CMP(crsq->prod, crsq); - crsqe = &crsq->crsq[sw_prod]; - memset(crsqe, 0, sizeof(*crsqe)); - crsq->prod++; - crsqe->req_size = req->cmd_size; /* ring CMDQ DB */ + wmb(); writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + rcfw->cmdq_bar_reg_prod_off); writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + @@ -214,9 +185,56 @@ retry: done: spin_unlock_irqrestore(&cmdq->lock, flags); /* Return the CREQ response pointer */ - return crsqe ? &crsqe->qp_event : NULL; + return 0; } +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct cmdq_base *req, + struct creq_base *resp, + void *sb, u8 is_block) +{ + struct creq_qp_event *evnt = (struct creq_qp_event *)resp; + u16 cookie; + u8 opcode, retry_cnt = 0xFF; + int rc = 0; + + do { + opcode = req->opcode; + rc = __send_message(rcfw, req, resp, sb, is_block); + cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; + if (!rc) + break; + + if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { + /* send failed */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", + cookie, opcode); + return rc; + } + is_block ? mdelay(1) : usleep_range(500, 1000); + + } while (retry_cnt--); + + if (is_block) + rc = __block_for_resp(rcfw, cookie); + else + rc = __wait_for_resp(rcfw, cookie); + if (rc) { + /* timed out */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", + cookie, opcode, RCFW_CMD_WAIT_TIME_MS); + return rc; + } + + if (evnt->status) { + /* failed with status */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", + cookie, opcode, evnt->status); + rc = -EFAULT; + } + + return rc; +} /* Completions */ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, struct creq_func_event *func_event) @@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, struct creq_qp_event *qp_event) { - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; - struct bnxt_qplib_crsqe *crsqe; - u16 cbit, cookie, blocked = 0; + struct bnxt_qplib_crsq *crsqe; unsigned long flags; - u32 sw_cons; + u16 cbit, blocked = 0; + u16 cookie; + __le16 mcookie; switch (qp_event->event) { case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: @@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, default: /* Command Response */ spin_lock_irqsave(&cmdq->lock, flags); - sw_cons = HWQ_CMP(crsq->cons, crsq); - crsqe = &crsq->crsq[sw_cons]; - crsq->cons++; - memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event)); - - cookie = le16_to_cpu(crsqe->qp_event.cookie); + cookie = le16_to_cpu(qp_event->cookie); + mcookie = qp_event->cookie; blocked = cookie & RCFW_CMD_IS_BLOCKING; cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; + crsqe = &rcfw->crsqe_tbl[cbit]; + if (crsqe->resp && + crsqe->resp->cookie == mcookie) { + memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); + crsqe->resp = NULL; + } else { + dev_err(&rcfw->pdev->dev, + "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", + crsqe->resp ? "mismatch" : "collision", + crsqe->resp ? crsqe->resp->cookie : 0, mcookie); + } if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) dev_warn(&rcfw->pdev->dev, "QPLIB: CMD bit %d was not requested", cbit); - cmdq->cons += crsqe->req_size; - spin_unlock_irqrestore(&cmdq->lock, flags); + crsqe->req_size = 0; + if (!blocked) wake_up(&rcfw->waitq); - break; + spin_unlock_irqrestore(&cmdq->lock, flags); } return 0; } @@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data) struct creq_base *creqe, **creq_ptr; u32 sw_cons, raw_cons; unsigned long flags; - u32 type; + u32 type, budget = CREQ_ENTRY_POLL_BUDGET; - /* Service the CREQ until empty */ + /* Service the CREQ until budget is over */ spin_lock_irqsave(&creq->lock, flags); raw_cons = creq->cons; - while (1) { + while (budget > 0) { sw_cons = HWQ_CMP(raw_cons, creq); creq_ptr = (struct creq_base **)creq->pbl_ptr; creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; @@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data) type = creqe->type & CREQ_BASE_TYPE_MASK; switch (type) { case CREQ_BASE_TYPE_QP_EVENT: - if (!bnxt_qplib_process_qp_event - (rcfw, (struct creq_qp_event *)creqe)) - rcfw->creq_qp_event_processed++; - else { - dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with"); - dev_warn(&rcfw->pdev->dev, - "QPLIB: type = 0x%x not handled", - type); - } + bnxt_qplib_process_qp_event + (rcfw, (struct creq_qp_event *)creqe); + rcfw->creq_qp_event_processed++; break; case CREQ_BASE_TYPE_FUNC_EVENT: if (!bnxt_qplib_process_func_event @@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data) break; } raw_cons++; + budget--; } + if (creq->cons != raw_cons) { creq->cons = raw_cons; CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, @@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) /* RCFW */ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) { - struct creq_deinitialize_fw_resp *resp; struct cmdq_deinitialize_fw req; + struct creq_deinitialize_fw_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); - resp = (struct creq_deinitialize_fw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) - return -EINVAL; - - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) - return -ETIMEDOUT; - - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) - return -EFAULT; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); return 0; @@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn) { - struct creq_initialize_fw_resp *resp; struct cmdq_initialize_fw req; + struct creq_initialize_fw_resp resp; u16 cmd_flags = 0, level; + int rc; RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); @@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, skip_ctx_setup: req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); - resp = (struct creq_initialize_fw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW failed"); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); return 0; } void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { - bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); - kfree(rcfw->crsq.crsq); + kfree(rcfw->crsqe_tbl); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); - rcfw->pdev = NULL; } @@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, goto fail; } - rcfw->crsq.max_elements = rcfw->cmdq.max_elements; - rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, - sizeof(*rcfw->crsq.crsq), GFP_KERNEL); - if (!rcfw->crsq.crsq) + rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, + sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); + if (!rcfw->crsqe_tbl) goto fail; - rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT; - if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0, - &rcfw->crsb.max_elements, - BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE, - HWQ_TYPE_CTX)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: HW channel CRSB allocation failed"); - goto fail; - } return 0; fail: @@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, int rc; /* General */ - atomic_set(&rcfw->seq_num, 0); + rcfw->seq_num = 0; rcfw->flags = FIRMWARE_FIRST_FLAG; bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * sizeof(unsigned long)); @@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; - /* CRSQ */ - rcfw->crsq.prod = 0; - rcfw->crsq.cons = 0; - /* CREQ */ rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); @@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); return 0; } + +struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( + struct bnxt_qplib_rcfw *rcfw, + u32 size) +{ + struct bnxt_qplib_rcfw_sbuf *sbuf; + + sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); + if (!sbuf) + return NULL; + + sbuf->size = size; + sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, + &sbuf->dma_addr, GFP_ATOMIC); + if (!sbuf->sb) + goto bail; + + return sbuf; +bail: + kfree(sbuf); + return NULL; +} + +void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_rcfw_sbuf *sbuf) +{ + if (sbuf->sb) + dma_free_coherent(&rcfw->pdev->dev, sbuf->size, + sbuf->sb, sbuf->dma_addr); + kfree(sbuf); +} diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index d3567d75bf58..09ce121770cd 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -73,6 +73,7 @@ #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT #define RCFW_MAX_COOKIE_VALUE 0x7FFF #define RCFW_CMD_IS_BLOCKING 0x8000 +#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 /* Cmdq contains a fix number of a 16-Byte slots */ struct bnxt_qplib_cmdqe { @@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe { u8 data[1024]; }; -/* CRSQ SB */ -#define BNXT_QPLIB_CRSBE_MAX_CNT 4 -#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe) -#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS) - -#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1) -#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1) - -static inline u32 get_crsb_pg(u32 val) -{ - return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG; -} - -static inline u32 get_crsb_idx(u32 val) -{ - return val & MAX_CRSB_IDX_PER_PG; -} - -static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr, - u32 prod, dma_addr_t *dma_addr) -{ - *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG]; - *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) * - BNXT_QPLIB_CRSBE_UNITS; -} - /* CREQ */ /* Allocate 1 per QP for async error notification for now */ #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) @@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val) #define CREQ_DB(db, raw_cons, cp_bit) \ writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) +#define CREQ_ENTRY_POLL_BUDGET 0x100 + /* HWQ */ -struct bnxt_qplib_crsqe { - struct creq_qp_event qp_event; + +struct bnxt_qplib_crsq { + struct creq_qp_event *resp; u32 req_size; }; -struct bnxt_qplib_crsq { - struct bnxt_qplib_crsqe *crsq; - u32 prod; - u32 cons; - u32 max_elements; +struct bnxt_qplib_rcfw_sbuf { + void *sb; + dma_addr_t dma_addr; + u32 size; }; /* RCFW Communication Channels */ @@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw { wait_queue_head_t waitq; int (*aeq_handler)(struct bnxt_qplib_rcfw *, struct creq_func_event *); - atomic_t seq_num; + u32 seq_num; /* Bar region info */ void __iomem *cmdq_bar_reg_iomem; @@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw { /* Actual Cmd and Resp Queues */ struct bnxt_qplib_hwq cmdq; - struct bnxt_qplib_crsq crsq; - struct bnxt_qplib_hwq crsb; + struct bnxt_qplib_crsq *crsqe_tbl; }; void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); @@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, (struct bnxt_qplib_rcfw *, struct creq_func_event *)); -int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); -int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); -void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, void **crsbe, - u8 is_block); +struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( + struct bnxt_qplib_rcfw *rcfw, + u32 size); +void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_rcfw_sbuf *sbuf); +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct cmdq_base *req, struct creq_base *resp, + void *sbuf, u8 is_block); int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 6277d802ca4b..2e4855509719 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) +#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ + ((HWQ_CMP(hwq->prod, hwq)\ + - HWQ_CMP(hwq->cons, hwq))\ + & (hwq->max_elements - 1))) enum bnxt_qplib_hwq_type { HWQ_TYPE_CTX, HWQ_TYPE_QUEUE, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 7b31eccedf11..fde18cf0e406 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr) { struct cmdq_query_func req; - struct creq_query_func_resp *resp; + struct creq_query_func_resp resp; + struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_func_resp_sb *sb; u16 cmd_flags = 0; u32 temp; u8 *tqm_alloc; - int i; + int i, rc = 0; RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); - req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - resp = (struct creq_query_func_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb, - 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed "); + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); + if (!sbuf) { dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; + "QPLIB: SP: QUERY_FUNC alloc side buffer failed"); + return -ENOMEM; } + + sb = sbuf->sb; + req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + (void *)sbuf, 0); + if (rc) + goto bail; + /* Extract the context from the side buffer */ attr->max_qp = le32_to_cpu(sb->max_qp); attr->max_qp_rd_atom = @@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); + /* + * 128 WQEs needs to be reserved for the HW (8916). Prevent + * reporting the max number + */ + attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS; attr->max_qp_sges = sb->max_sge; attr->max_cq = le32_to_cpu(sb->max_cq); attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); @@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); } - return 0; + +bail: + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); + return rc; } /* SGID */ @@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, /* Remove GID from the SGID table */ if (update) { struct cmdq_delete_gid req; - struct creq_delete_gid_resp *resp; + struct creq_delete_gid_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); if (sgid_tbl->hw_id[index] == 0xFFFF) { @@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return -EINVAL; } req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); - resp = (struct creq_delete_gid_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, - 0); - if (!resp) { - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; } memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero)); @@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_res, sgid_tbl); struct bnxt_qplib_rcfw *rcfw = res->rcfw; - int i, free_idx, rc = 0; + int i, free_idx; if (!sgid_tbl) { dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); @@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } if (update) { struct cmdq_add_gid req; - struct creq_add_gid_resp *resp; + struct creq_add_gid_resp resp; u16 cmd_flags = 0; u32 temp32[4]; u16 temp16[3]; + int rc; RCFW_CMD_PREP(req, ADD_GID, cmd_flags); @@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, req.src_mac[1] = cpu_to_be16(temp16[1]); req.src_mac[2] = cpu_to_be16(temp16[2]); - resp = (struct creq_add_gid_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, - "QPLIB: SP: ADD_GID send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, - "QPIB: SP: ADD_GID timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid); + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; + sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); } /* Add GID to the sgid_tbl */ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); @@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, *index = free_idx; /* unlock */ - return rc; + return 0; } /* pkeys */ @@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_ah req; - struct creq_create_ah_resp *resp; + struct creq_create_ah_resp resp; u16 cmd_flags = 0; u32 temp32[4]; u16 temp16[3]; + int rc; RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); @@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) req.dest_mac[1] = cpu_to_le16(temp16[1]); req.dest_mac[2] = cpu_to_le16(temp16[2]); - resp = (struct creq_create_ah_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 1); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - ah->id = le32_to_cpu(resp->xid); + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 1); + if (rc) + return rc; + + ah->id = le32_to_cpu(resp.xid); return 0; } @@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_ah req; - struct creq_destroy_ah_resp *resp; + struct creq_destroy_ah_resp resp; u16 cmd_flags = 0; + int rc; /* Clean up the AH table in the device */ RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); req.ah_cid = cpu_to_le32(ah->id); - resp = (struct creq_destroy_ah_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 1); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 1); + if (rc) + return rc; return 0; } @@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_deallocate_key req; - struct creq_deallocate_key_resp *resp; + struct creq_deallocate_key_resp resp; u16 cmd_flags = 0; + int rc; if (mrw->lkey == 0xFFFFFFFF) { dev_info(&res->pdev->dev, @@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) else req.key = cpu_to_le32(mrw->lkey); - resp = (struct creq_deallocate_key_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; + /* Free the qplib's MRW memory */ if (mrw->hwq.max_elements) bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); @@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_allocate_mrw req; - struct creq_allocate_mrw_resp *resp; + struct creq_allocate_mrw_resp resp; u16 cmd_flags = 0; unsigned long tmp; + int rc; RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); @@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) tmp = (unsigned long)mrw; req.mrw_handle = cpu_to_le64(tmp); - resp = (struct creq_allocate_mrw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; + if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) - mrw->rkey = le32_to_cpu(resp->xid); + mrw->rkey = le32_to_cpu(resp.xid); else - mrw->lkey = le32_to_cpu(resp->xid); + mrw->lkey = le32_to_cpu(resp.xid); return 0; } @@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_deregister_mr req; - struct creq_deregister_mr_resp *resp; + struct creq_deregister_mr_resp resp; u16 cmd_flags = 0; int rc; RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); req.lkey = cpu_to_le32(mrw->lkey); - resp = (struct creq_deregister_mr_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, block); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed"); - return -EINVAL; - } - if (block) - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, - le16_to_cpu(req.cookie)); - else - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie)); - if (!rc) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, block); + if (rc) + return rc; /* Free the qplib's MR memory */ if (mrw->hwq.max_elements) { @@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_register_mr req; - struct creq_register_mr_resp *resp; + struct creq_register_mr_resp resp; u16 cmd_flags = 0, level; int pg_ptrs, pages, i, rc; dma_addr_t **pbl_ptr; @@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, req.key = cpu_to_le32(mr->lkey); req.mr_size = cpu_to_le64(mr->total_size); - resp = (struct creq_register_mr_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, block); - if (!resp) { - dev_err(&res->pdev->dev, "SP: REG_MR send failed"); - rc = -EINVAL; - goto fail; - } - if (block) - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, - le16_to_cpu(req.cookie)); - else - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie)); - if (!rc) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "SP: REG_MR timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed "); - dev_err(&res->pdev->dev, - "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, block); + if (rc) goto fail; - } + return 0; fail: @@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_map_tc_to_cos req; - struct creq_map_tc_to_cos_resp *resp; + struct creq_map_tc_to_cos_resp resp; u16 cmd_flags = 0; - int tleft; + int rc = 0; RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); req.cos0 = cpu_to_le16(cids[0]); req.cos1 = cpu_to_le16(cids[1]); - resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed"); - return -EINVAL; - } - - tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie)); - if (!tleft) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out"); - return -ETIMEDOUT; - } - - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 1442a617e968..a543f959098b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -40,6 +40,8 @@ #ifndef __BNXT_QPLIB_SP_H__ #define __BNXT_QPLIB_SP_H__ +#define BNXT_QPLIB_RESERVED_QP_WRS 128 + struct bnxt_qplib_dev_attr { char fw_ver[32]; u16 max_sgid; diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 558d6a03375d..3eff6541bd6f 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -142,8 +142,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) pr_debug("%s alloc_skb failed\n", __func__); return -ENOMEM; } - wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); - memset(wqe, 0, sizeof(*wqe)); + wqe = skb_put_zero(skb, sizeof(*wqe)); build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7, T3_SOPEOP); @@ -561,8 +560,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) | V_EC_TYPE(0) | V_EC_GEN(1) | V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32; - wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); - memset(wqe, 0, sizeof(*wqe)); + wqe = skb_put_zero(skb, sizeof(*wqe)); build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0, T3_CTL_QP_TID, 7, T3_SOPEOP); wqe->flags = cpu_to_be32(MODQP_WRITE_EC); @@ -837,7 +835,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) if (!skb) return -ENOMEM; pr_debug("%s rdev_p %p\n", __func__, rdev_p); - wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | V_FW_RIWR_LEN(sizeof(*wqe) >> 3)); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index b61630eba912..86975370a4c0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -175,7 +175,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) skb = get_skb(skb, sizeof *req, GFP_KERNEL); if (!skb) return; - req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); skb->priority = CPL_PRIORITY_SETUP; @@ -190,7 +190,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep) if (!skb) return -ENOMEM; - req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); @@ -211,7 +211,7 @@ int iwch_resume_tid(struct iwch_ep *ep) if (!skb) return -ENOMEM; - req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); @@ -398,7 +398,7 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) } skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, arp_failure_discard); - req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); @@ -417,8 +417,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) } skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, abort_arp_failure); - req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = skb_put_zero(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); @@ -456,7 +455,7 @@ static int send_connect(struct iwch_ep *ep) skb->priority = CPL_PRIORITY_SETUP; set_arp_failure_handler(skb, act_open_req_arp_failure); - req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid)); req->local_port = ep->com.local_addr.sin_port; @@ -514,7 +513,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) set_arp_failure_handler(skb, arp_failure_discard); skb_reset_transport_header(skb); len = skb->len; - req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); + req = skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); req->len = htonl(len); @@ -547,7 +546,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) return -ENOMEM; } skb_reserve(skb, sizeof(*req)); - mpa = (struct mpa_message *) skb_put(skb, mpalen); + mpa = skb_put(skb, mpalen); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = MPA_REJECT; @@ -565,7 +564,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, arp_failure_discard); skb_reset_transport_header(skb); - req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); + req = skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); req->len = htonl(mpalen); @@ -597,7 +596,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) } skb->priority = CPL_PRIORITY_DATA; skb_reserve(skb, sizeof(*req)); - mpa = (struct mpa_message *) skb_put(skb, mpalen); + mpa = skb_put(skb, mpalen); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | @@ -616,7 +615,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) set_arp_failure_handler(skb, arp_failure_discard); skb_reset_transport_header(skb); len = skb->len; - req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); + req = skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); req->len = htonl(len); @@ -801,7 +800,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits) return 0; } - req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); @@ -1206,7 +1205,7 @@ static int listen_start(struct iwch_listen_ep *ep) return -ENOMEM; } - req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid)); req->local_port = ep->com.local_addr.sin_port; @@ -1247,7 +1246,7 @@ static int listen_stop(struct iwch_listen_ep *ep) pr_err("%s - failed to alloc skb\n", __func__); return -ENOMEM; } - req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->cpu_idx = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); @@ -1615,7 +1614,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) goto out; } rpl_skb->priority = CPL_PRIORITY_DATA; - rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); + rpl = skb_put(rpl_skb, sizeof(*rpl)); rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index ba6d5d281b03..7f633da0185d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -670,8 +670,7 @@ int iwch_post_zb_read(struct iwch_ep *ep) pr_err("%s cannot send zb_read!!\n", __func__); return -ENOMEM; } - wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr)); - memset(wqe, 0, sizeof(struct t3_rdma_read_wr)); + wqe = skb_put_zero(skb, sizeof(struct t3_rdma_read_wr)); wqe->read.rdmaop = T3_READ_REQ; wqe->read.reserved[0] = 0; wqe->read.reserved[1] = 0; @@ -702,8 +701,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) pr_err("%s cannot send TERMINATE!\n", __func__); return -ENOMEM; } - wqe = (union t3_wr *)skb_put(skb, 40); - memset(wqe, 0, 40); + wqe = skb_put_zero(skb, 40); wqe->send.rdmaop = T3_TERMINATE; /* immediate data length */ diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0910faf3587b..e49b34c3b136 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -398,7 +398,8 @@ void _c4iw_free_ep(struct kref *kref) (const u32 *)&sin6->sin6_addr.s6_addr, 1); } - cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, + ep->com.local_addr.ss_family); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); if (ep->mpa_skb) @@ -596,7 +597,7 @@ static int send_flowc(struct c4iw_ep *ep) else nparams = 9; - flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN); + flowc = __skb_put(skb, FLOWC_LEN); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); @@ -786,18 +787,16 @@ static int send_connect(struct c4iw_ep *ep) if (ep->com.remote_addr.ss_family == AF_INET) { switch (CHELSIO_CHIP_VERSION(adapter_type)) { case CHELSIO_T4: - req = (struct cpl_act_open_req *)skb_put(skb, wrlen); + req = skb_put(skb, wrlen); INIT_TP_WR(req, 0); break; case CHELSIO_T5: - t5req = (struct cpl_t5_act_open_req *)skb_put(skb, - wrlen); + t5req = skb_put(skb, wrlen); INIT_TP_WR(t5req, 0); req = (struct cpl_act_open_req *)t5req; break; case CHELSIO_T6: - t6req = (struct cpl_t6_act_open_req *)skb_put(skb, - wrlen); + t6req = skb_put(skb, wrlen); INIT_TP_WR(t6req, 0); req = (struct cpl_act_open_req *)t6req; t5req = (struct cpl_t5_act_open_req *)t6req; @@ -838,18 +837,16 @@ static int send_connect(struct c4iw_ep *ep) } else { switch (CHELSIO_CHIP_VERSION(adapter_type)) { case CHELSIO_T4: - req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); + req6 = skb_put(skb, wrlen); INIT_TP_WR(req6, 0); break; case CHELSIO_T5: - t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb, - wrlen); + t5req6 = skb_put(skb, wrlen); INIT_TP_WR(t5req6, 0); req6 = (struct cpl_act_open_req6 *)t5req6; break; case CHELSIO_T6: - t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb, - wrlen); + t6req6 = skb_put(skb, wrlen); INIT_TP_WR(t6req6, 0); req6 = (struct cpl_act_open_req6 *)t6req6; t5req6 = (struct cpl_t5_act_open_req6 *)t6req6; @@ -926,8 +923,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); - memset(req, 0, wrlen); + req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | @@ -1033,8 +1029,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); - memset(req, 0, wrlen); + req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | @@ -1114,8 +1109,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); - memset(req, 0, wrlen); + req = skb_put_zero(skb, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL_F | @@ -1199,7 +1193,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) /* setup the hwtid for this connection */ ep->hwtid = tid; - cxgb4_insert_tid(t, ep, tid); + cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family); insert_ep_tid(ep); ep->snd_seq = be32_to_cpu(req->snd_isn); @@ -1906,8 +1900,7 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) int win; skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); - req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = __skb_put_zero(skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.filter = cpu_to_be32(cxgb4_select_ntuple( @@ -2304,7 +2297,8 @@ fail: (const u32 *)&sin6->sin6_addr.s6_addr, 1); } if (status && act_open_has_tid(status)) - cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl), + ep->com.local_addr.ss_family); remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); cxgb4_free_atid(t, atid); @@ -2581,7 +2575,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); init_timer(&child_ep->timer); - cxgb4_insert_tid(t, child_ep, hwtid); + cxgb4_insert_tid(t, child_ep, hwtid, + child_ep->com.local_addr.ss_family); insert_ep_tid(child_ep); if (accept_cr(child_ep, skb, req)) { c4iw_put_ep(&parent_ep->com); @@ -2849,7 +2844,8 @@ out: 1); } remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); - cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, + ep->com.local_addr.ss_family); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_reconnect(ep); @@ -3752,9 +3748,9 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) */ memset(&tmp_opt, 0, sizeof(tmp_opt)); tcp_clear_options(&tmp_opt); - tcp_parse_options(skb, &tmp_opt, 0, NULL); + tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL); - req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); + req = __skb_push(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); req->l2info = cpu_to_be16(SYN_INTF_V(intf) | SYN_MAC_IDX_V(RX_MACIDX_G( @@ -3806,8 +3802,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); if (!req_skb) return; - req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = __skb_put_zero(req_skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 14de5bde1b63..e16fcaf6b5a3 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -44,8 +44,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, wr_len = sizeof *res_wr + sizeof *res; set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); - memset(res_wr, 0, wr_len); + res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | @@ -114,8 +113,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); - memset(res_wr, 0, wr_len); + res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index f96a96dbcf1f..ae0b79aeea2e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, kfree(entry); } - list_for_each_safe(pos, nxt, &uctx->qpids) { + list_for_each_safe(pos, nxt, &uctx->cqids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); kfree(entry); @@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); if (!rdev->free_workq) { err = -ENOMEM; - goto err_free_status_page; + goto err_free_status_page_and_wr_log; } rdev->status_page->db_off = 0; return 0; -err_free_status_page: +err_free_status_page_and_wr_log: + if (c4iw_wr_log && rdev->wr_log) + kfree(rdev->wr_log); free_page((unsigned long)rdev->status_page); destroy_ocqp_pool: c4iw_ocqp_pool_destroy(rdev); @@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) { destroy_workqueue(rdev->free_workq); kfree(rdev->wr_log); + c4iw_release_dev_ucontext(rdev, &rdev->uctx); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); + c4iw_ocqp_pool_destroy(rdev); c4iw_destroy_resource(&rdev->resource); } diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 3ee7f43e419a..5332f06b99ba 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -81,8 +81,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - req = (struct ulp_mem_io *)__skb_put(skb, wr_len); - memset(req, 0, wr_len); + req = __skb_put_zero(skb, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | (wait ? FW_WR_COMPL_F : 0)); @@ -142,8 +141,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - req = (struct ulp_mem_io *)__skb_put(skb, wr_len); - memset(req, 0, wr_len); + req = __skb_put_zero(skb, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); if (i == (num_wqe-1)) { diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 8e4154b4253e..bfc77596acbe 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -293,8 +293,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); - memset(res_wr, 0, wr_len); + res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(2) | @@ -1228,7 +1227,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); - wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); wqe->flowid_len16 = cpu_to_be32( @@ -1350,7 +1349,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | @@ -1419,7 +1418,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) } set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); - wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + wqe = __skb_put(skb, sizeof(*wqe)); memset(wqe, 0, sizeof *wqe); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 90e7b77d68e8..2d19f9bb434d 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1779,7 +1779,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->alloc_hw_stats = alloc_hw_stats; ibdev->get_hw_stats = get_hw_stats; ibdev->alloc_rdma_netdev = hfi1_vnic_alloc_rn; - ibdev->free_rdma_netdev = hfi1_vnic_free_rn; /* keep process mad in the driver */ ibdev->process_mad = hfi1_process_mad; diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h index e2c455299b53..4a621cde4abb 100644 --- a/drivers/infiniband/hw/hfi1/vnic.h +++ b/drivers/infiniband/hw/hfi1/vnic.h @@ -176,7 +176,6 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *)); -void hfi1_vnic_free_rn(struct net_device *netdev); int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, struct hfi1_vnic_vport_info *vinfo, struct sk_buff *skb, u64 pbc, u8 plen); diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index b601c2929f8f..339f0cdd56d6 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -833,6 +833,15 @@ static const struct net_device_ops hfi1_netdev_ops = { .ndo_get_stats64 = hfi1_vnic_get_stats64, }; +static void hfi1_vnic_free_rn(struct net_device *netdev) +{ + struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); + + hfi1_vnic_deinit(vinfo); + mutex_destroy(&vinfo->lock); + free_netdev(netdev); +} + struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, u8 port_num, enum rdma_netdev_t type, @@ -864,6 +873,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, vinfo->num_tx_q = dd->chip_sdma_engines; vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT; vinfo->netdev = netdev; + rn->free_rdma_netdev = hfi1_vnic_free_rn; rn->set_id = hfi1_vnic_set_vesw_id; netdev->features = NETIF_F_HIGHDMA | NETIF_F_SG; @@ -892,12 +902,3 @@ init_fail: free_netdev(netdev); return ERR_PTR(rc); } - -void hfi1_vnic_free_rn(struct net_device *netdev) -{ - struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); - - hfi1_vnic_deinit(vinfo); - mutex_destroy(&vinfo->lock); - free_netdev(netdev); -} diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index a3f18a22f5ed..e0f47cc2effc 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1939,7 +1939,7 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev, bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev) { struct i40iw_device *iwdev; - wait_queue_t wait; + wait_queue_entry_t wait; iwdev = dev->back_dev; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 521d0def2d9e..75b2f7d4cd95 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -61,8 +61,7 @@ #include <rdma/mlx4-abi.h> #define DRV_NAME MLX4_IB_DRV_NAME -#define DRV_VERSION "2.2-1" -#define DRV_RELDATE "Feb 2014" +#define DRV_VERSION "4.0-0" #define MLX4_IB_FLOW_MAX_PRIO 0xFFF #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF @@ -79,7 +78,7 @@ MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_ass static const char mlx4_ib_version[] = DRV_NAME ": Mellanox ConnectX InfiniBand driver v" - DRV_VERSION " (" DRV_RELDATE ")\n"; + DRV_VERSION "\n"; static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 94c049b62c2f..a384d72ea3cd 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -788,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_db; @@ -884,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_buf; @@ -1314,7 +1314,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto ex_resize; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index f1b56de64871..95db929bdc34 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -218,7 +218,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters_ext *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; @@ -231,7 +231,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 0c79983c8b1a..a7f2e60085c4 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -60,8 +60,7 @@ #include "cmd.h" #define DRIVER_NAME "mlx5_ib" -#define DRIVER_VERSION "2.2-1" -#define DRIVER_RELDATE "Feb 2014" +#define DRIVER_VERSION "5.0-0" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); @@ -70,7 +69,7 @@ MODULE_VERSION(DRIVER_VERSION); static char mlx5_version[] = DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" - DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; + DRIVER_VERSION "\n"; enum { MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, @@ -224,8 +223,8 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, return 0; } -static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, - struct ib_port_attr *props) +static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, + struct ib_port_attr *props) { struct mlx5_ib_dev *dev = to_mdev(device); struct mlx5_core_dev *mdev = dev->mdev; @@ -233,12 +232,14 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, enum ib_mtu ndev_ib_mtu; u16 qkey_viol_cntr; u32 eth_prot_oper; + int err; /* Possible bad flows are checked before filling out props so in case * of an error it will still be zeroed out. */ - if (mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, port_num)) - return; + err = mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, port_num); + if (err) + return err; translate_eth_proto_oper(eth_prot_oper, &props->active_speed, &props->active_width); @@ -259,7 +260,7 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, ndev = mlx5_ib_get_netdev(device, port_num); if (!ndev) - return; + return 0; if (mlx5_lag_is_active(dev->mdev)) { rcu_read_lock(); @@ -282,75 +283,49 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, dev_put(ndev); props->active_mtu = min(props->max_mtu, ndev_ib_mtu); + return 0; } -static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, - const struct ib_gid_attr *attr, - void *mlx5_addr) +static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr) { -#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) - char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, - source_l3_address); - void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, - source_mac_47_32); - - if (!gid) - return; + enum ib_gid_type gid_type = IB_GID_TYPE_IB; + u8 roce_version = 0; + u8 roce_l3_type = 0; + bool vlan = false; + u8 mac[ETH_ALEN]; + u16 vlan_id = 0; - ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr); + if (gid) { + gid_type = attr->gid_type; + ether_addr_copy(mac, attr->ndev->dev_addr); - if (is_vlan_dev(attr->ndev)) { - MLX5_SET_RA(mlx5_addr, vlan_valid, 1); - MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev)); + if (is_vlan_dev(attr->ndev)) { + vlan = true; + vlan_id = vlan_dev_vlan_id(attr->ndev); + } } - switch (attr->gid_type) { + switch (gid_type) { case IB_GID_TYPE_IB: - MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); + roce_version = MLX5_ROCE_VERSION_1; break; case IB_GID_TYPE_ROCE_UDP_ENCAP: - MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); + roce_version = MLX5_ROCE_VERSION_2; + if (ipv6_addr_v4mapped((void *)gid)) + roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; + else + roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; break; default: - WARN_ON(true); + mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); } - if (attr->gid_type != IB_GID_TYPE_IB) { - if (ipv6_addr_v4mapped((void *)gid)) - MLX5_SET_RA(mlx5_addr, roce_l3_type, - MLX5_ROCE_L3_TYPE_IPV4); - else - MLX5_SET_RA(mlx5_addr, roce_l3_type, - MLX5_ROCE_L3_TYPE_IPV6); - } - - if ((attr->gid_type == IB_GID_TYPE_IB) || - !ipv6_addr_v4mapped((void *)gid)) - memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid)); - else - memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4); -} - -static int set_roce_addr(struct ib_device *device, u8 port_num, - unsigned int index, - const union ib_gid *gid, - const struct ib_gid_attr *attr) -{ - struct mlx5_ib_dev *dev = to_mdev(device); - u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; - void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); - enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num); - - if (ll != IB_LINK_LAYER_ETHERNET) - return -EINVAL; - - ib_gid_to_mlx5_roce_addr(gid, attr, in_addr); - - MLX5_SET(set_roce_address_in, in, roce_address_index, index); - MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); - return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, + roce_l3_type, gid->raw, mac, vlan, + vlan_id); } static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, @@ -358,13 +333,13 @@ static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, const struct ib_gid_attr *attr, __always_unused void **context) { - return set_roce_addr(device, port_num, index, gid, attr); + return set_roce_addr(to_mdev(device), port_num, index, gid, attr); } static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, unsigned int index, __always_unused void **context) { - return set_roce_addr(device, port_num, index, NULL, NULL); + return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL); } __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, @@ -440,7 +415,7 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev, u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); u8 atomic_req_8B_endianness_mode = - MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode); + MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); /* Check if HW supports 8 bytes standard atomic operations and capable * of host endianness respond @@ -979,20 +954,31 @@ out: int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { + unsigned int count; + int ret; + switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: - return mlx5_query_mad_ifc_port(ibdev, port, props); + ret = mlx5_query_mad_ifc_port(ibdev, port, props); + break; case MLX5_VPORT_ACCESS_METHOD_HCA: - return mlx5_query_hca_port(ibdev, port, props); + ret = mlx5_query_hca_port(ibdev, port, props); + break; case MLX5_VPORT_ACCESS_METHOD_NIC: - mlx5_query_port_roce(ibdev, port, props); - return 0; + ret = mlx5_query_port_roce(ibdev, port, props); + break; default: - return -EINVAL; + ret = -EINVAL; + } + + if (!ret && props) { + count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev); + props->gid_tbl_len -= count; } + return ret; } static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, @@ -2263,7 +2249,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, if (!is_valid_attr(dev->mdev, flow_attr)) return ERR_PTR(-EINVAL); - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); handler = kzalloc(sizeof(*handler), GFP_KERNEL); if (!handler || !spec) { err = -ENOMEM; @@ -3468,7 +3454,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev, __be32 val; int ret, i; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -3497,7 +3483,7 @@ static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev, int ret, i; int offset = port->cnts.num_q_counters; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -3542,6 +3528,11 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, return num_counters; } +static void mlx5_ib_free_rdma_netdev(struct net_device *netdev) +{ + return mlx5_rdma_netdev_free(netdev); +} + static struct net_device* mlx5_ib_alloc_rdma_netdev(struct ib_device *hca, u8 port_num, @@ -3550,16 +3541,19 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca, unsigned char name_assign_type, void (*setup)(struct net_device *)) { + struct net_device *netdev; + struct rdma_netdev *rn; + if (type != RDMA_NETDEV_IPOIB) return ERR_PTR(-EOPNOTSUPP); - return mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca, - name, setup); -} - -static void mlx5_ib_free_rdma_netdev(struct net_device *netdev) -{ - return mlx5_rdma_netdev_free(netdev); + netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca, + name, setup); + if (likely(!IS_ERR_OR_NULL(netdev))) { + rn = netdev_priv(netdev); + rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev; + } + return netdev; } static void *mlx5_ib_add(struct mlx5_core_dev *mdev) @@ -3692,8 +3686,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; - dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; - dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; + if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) + dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; + if (mlx5_core_is_pf(mdev)) { dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 366433f71b58..763bb5b36144 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1110,7 +1110,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*pas) * ((npages + 1) / 2) * 2; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_1; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ebb6768684de..0889ff367c86 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -823,7 +823,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_umem; @@ -931,7 +931,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_buf; @@ -1060,7 +1060,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, return err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_umem; @@ -1140,7 +1140,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, u32 rq_pas_size = get_rq_pas_size(qpc); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1193,7 +1193,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1372,7 +1372,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, } inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1633,7 +1633,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (err) return err; } else { - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2164,7 +2164,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2189,7 +2189,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2434,7 +2434,7 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2479,7 +2479,7 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_sq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -4281,7 +4281,7 @@ static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_sq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4308,7 +4308,7 @@ static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_rq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4612,7 +4612,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, dev = to_mdev(pd->device); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -4842,7 +4842,7 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, return ERR_PTR(-ENOMEM); inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err; @@ -4921,7 +4921,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, return -EOPNOTSUPP; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 7cb145f9a6db..43707b101f47 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -127,7 +127,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, goto err_umem; } - in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont); + in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_umem; @@ -189,7 +189,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, } mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); - in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages); + in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_buf; diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 5b9601014f0c..a30aa6527f7e 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -815,7 +815,7 @@ static struct pci_driver nes_pci_driver = { .remove = nes_remove, }; -static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf) +static ssize_t adapter_show(struct device_driver *ddp, char *buf) { unsigned int devfn = 0xffffffff; unsigned char bus_number = 0xff; @@ -834,7 +834,7 @@ static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "%x:%x\n", bus_number, devfn); } -static ssize_t nes_store_adapter(struct device_driver *ddp, +static ssize_t adapter_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -843,7 +843,7 @@ static ssize_t nes_store_adapter(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf) +static ssize_t eeprom_cmd_show(struct device_driver *ddp, char *buf) { u32 eeprom_cmd = 0xdead; u32 i = 0; @@ -859,7 +859,7 @@ static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd); } -static ssize_t nes_store_ee_cmd(struct device_driver *ddp, +static ssize_t eeprom_cmd_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -880,7 +880,7 @@ static ssize_t nes_store_ee_cmd(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf) +static ssize_t eeprom_data_show(struct device_driver *ddp, char *buf) { u32 eeprom_data = 0xdead; u32 i = 0; @@ -897,7 +897,7 @@ static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data); } -static ssize_t nes_store_ee_data(struct device_driver *ddp, +static ssize_t eeprom_data_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -918,7 +918,7 @@ static ssize_t nes_store_ee_data(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf) +static ssize_t flash_cmd_show(struct device_driver *ddp, char *buf) { u32 flash_cmd = 0xdead; u32 i = 0; @@ -935,7 +935,7 @@ static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd); } -static ssize_t nes_store_flash_cmd(struct device_driver *ddp, +static ssize_t flash_cmd_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -956,7 +956,7 @@ static ssize_t nes_store_flash_cmd(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf) +static ssize_t flash_data_show(struct device_driver *ddp, char *buf) { u32 flash_data = 0xdead; u32 i = 0; @@ -973,7 +973,7 @@ static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data); } -static ssize_t nes_store_flash_data(struct device_driver *ddp, +static ssize_t flash_data_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -994,12 +994,12 @@ static ssize_t nes_store_flash_data(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf) +static ssize_t nonidx_addr_show(struct device_driver *ddp, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr); } -static ssize_t nes_store_nonidx_addr(struct device_driver *ddp, +static ssize_t nonidx_addr_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -1010,7 +1010,7 @@ static ssize_t nes_store_nonidx_addr(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf) +static ssize_t nonidx_data_show(struct device_driver *ddp, char *buf) { u32 nonidx_data = 0xdead; u32 i = 0; @@ -1027,7 +1027,7 @@ static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data); } -static ssize_t nes_store_nonidx_data(struct device_driver *ddp, +static ssize_t nonidx_data_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -1048,12 +1048,12 @@ static ssize_t nes_store_nonidx_data(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf) +static ssize_t idx_addr_show(struct device_driver *ddp, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr); } -static ssize_t nes_store_idx_addr(struct device_driver *ddp, +static ssize_t idx_addr_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -1064,7 +1064,7 @@ static ssize_t nes_store_idx_addr(struct device_driver *ddp, return strnlen(buf, count); } -static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf) +static ssize_t idx_data_show(struct device_driver *ddp, char *buf) { u32 idx_data = 0xdead; u32 i = 0; @@ -1081,7 +1081,7 @@ static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data); } -static ssize_t nes_store_idx_data(struct device_driver *ddp, +static ssize_t idx_data_store(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; @@ -1102,11 +1102,7 @@ static ssize_t nes_store_idx_data(struct device_driver *ddp, return strnlen(buf, count); } - -/** - * nes_show_wqm_quanta - */ -static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf) +static ssize_t wqm_quanta_show(struct device_driver *ddp, char *buf) { u32 wqm_quanta_value = 0xdead; u32 i = 0; @@ -1123,12 +1119,8 @@ static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf) return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value); } - -/** - * nes_store_wqm_quanta - */ -static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, - const char *buf, size_t count) +static ssize_t wqm_quanta_store(struct device_driver *ddp, const char *buf, + size_t count) { unsigned long wqm_quanta_value; u32 wqm_config1; @@ -1153,26 +1145,16 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, return strnlen(buf, count); } -static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR, - nes_show_adapter, nes_store_adapter); -static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR, - nes_show_ee_cmd, nes_store_ee_cmd); -static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR, - nes_show_ee_data, nes_store_ee_data); -static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR, - nes_show_flash_cmd, nes_store_flash_cmd); -static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR, - nes_show_flash_data, nes_store_flash_data); -static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR, - nes_show_nonidx_addr, nes_store_nonidx_addr); -static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR, - nes_show_nonidx_data, nes_store_nonidx_data); -static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR, - nes_show_idx_addr, nes_store_idx_addr); -static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR, - nes_show_idx_data, nes_store_idx_data); -static DRIVER_ATTR(wqm_quanta, S_IRUSR | S_IWUSR, - nes_show_wqm_quanta, nes_store_wqm_quanta); +static DRIVER_ATTR_RW(adapter); +static DRIVER_ATTR_RW(eeprom_cmd); +static DRIVER_ATTR_RW(eeprom_data); +static DRIVER_ATTR_RW(flash_cmd); +static DRIVER_ATTR_RW(flash_data); +static DRIVER_ATTR_RW(nonidx_addr); +static DRIVER_ATTR_RW(nonidx_data); +static DRIVER_ATTR_RW(idx_addr); +static DRIVER_ATTR_RW(idx_data); +static DRIVER_ATTR_RW(wqm_quanta); static int nes_create_driver_sysfs(struct pci_driver *drv) { diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 30b256a2c54e..de4025deaa4a 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -742,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, if (type == NES_TIMER_TYPE_SEND) { new_send->seq_num = ntohl(tcp_hdr(skb)->seq); - atomic_inc(&new_send->skb->users); + refcount_inc(&new_send->skb->users); spin_lock_irqsave(&cm_node->retrans_list_lock, flags); cm_node->send_entry = new_send; add_ref_cm_node(cm_node); @@ -924,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass) flags); break; } - atomic_inc(&send_entry->skb->users); + refcount_inc(&send_entry->skb->users); cm_packets_retrans++; nes_debug(NES_DBG_CM, "Retransmitting send_entry %p " "for node %p, jiffies = %lu, time to send = " diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 6a72095d6c7a..b5851fd67d4f 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -37,7 +37,7 @@ #include <linux/iommu.h> #include <linux/pci.h> #include <net/addrconf.h> -#include <linux/qed/qede_roce.h> + #include <linux/qed/qed_chain.h> #include <linux/qed/qed_if.h> #include "qedr.h" @@ -276,7 +276,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) QED_CHAIN_CNT_TYPE_U16, n_entries, sizeof(struct regpair *), - &cnq->pbl); + &cnq->pbl, NULL); if (rc) goto err4; @@ -886,9 +886,9 @@ static void qedr_mac_address_change(struct qedr_dev *dev) memcpy(&sgid->raw[8], guid, sizeof(guid)); /* Update LL2 */ - rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev, - dev->gsi_ll2_mac_address, - dev->ndev->dev_addr); + rc = dev->ops->ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, + dev->ndev->dev_addr); ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); @@ -902,7 +902,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) * initialization done before RoCE driver notifies * event to stack. */ -static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) +static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) { switch (event) { case QEDE_UP: @@ -931,12 +931,12 @@ static struct qedr_driver qedr_drv = { static int __init qedr_init_module(void) { - return qede_roce_register_driver(&qedr_drv); + return qede_rdma_register_driver(&qedr_drv); } static void __exit qedr_exit_module(void) { - qede_roce_unregister_driver(&qedr_drv); + qede_rdma_unregister_driver(&qedr_drv); } module_init(qedr_init_module); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index aa08c76a4245..ab7784bfdac6 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -36,8 +36,8 @@ #include <rdma/ib_addr.h> #include <linux/qed/qed_if.h> #include <linux/qed/qed_chain.h> -#include <linux/qed/qed_roce_if.h> -#include <linux/qed/qede_roce.h> +#include <linux/qed/qed_rdma_if.h> +#include <linux/qed/qede_rdma.h> #include <linux/qed/roce_common.h> #include "qedr_hsi_rdma.h" @@ -58,7 +58,10 @@ #define QEDR_MSG_QP " QP" #define QEDR_MSG_GSI " GSI" -#define QEDR_CQ_MAGIC_NUMBER (0x11223344) +#define QEDR_CQ_MAGIC_NUMBER (0x11223344) + +#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE) +#define FW_PAGE_SHIFT (12) struct qedr_dev; @@ -150,6 +153,8 @@ struct qedr_dev { u32 dp_module; u8 dp_level; u8 num_hwfns; + u8 gsi_ll2_handle; + uint wq_multiplier; u8 gsi_ll2_mac_address[ETH_ALEN]; int gsi_qp_created; diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index d86dbe814d98..4689e802b332 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -44,7 +44,7 @@ #include <rdma/ib_cache.h> #include <linux/qed/qed_if.h> -#include <linux/qed/qed_roce_if.h> +#include <linux/qed/qed_rdma_if.h> #include "qedr.h" #include "verbs.h" #include <rdma/qedr-abi.h> @@ -64,9 +64,14 @@ void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, dev->gsi_qp = qp; } -void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) +void qedr_ll2_complete_tx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet) { - struct qedr_dev *dev = (struct qedr_dev *)_qdev; + struct qedr_dev *dev = (struct qedr_dev *)cxt; + struct qed_roce_ll2_packet *pkt = cookie; struct qedr_cq *cq = dev->gsi_sqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; @@ -88,20 +93,26 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } -void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, - struct qed_roce_ll2_rx_params *params) +void qedr_ll2_complete_rx_packet(void *cxt, + struct qed_ll2_comp_rx_data *data) { - struct qedr_dev *dev = (struct qedr_dev *)_dev; + struct qedr_dev *dev = (struct qedr_dev *)cxt; struct qedr_cq *cq = dev->gsi_rqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; spin_lock_irqsave(&qp->q_lock, flags); - qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc; - qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id; - qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len; - ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac); + qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? + -EINVAL : 0; + qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; + /* note: length stands for data length i.e. GRH is excluded */ + qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = + data->length.data_length; + *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) = + ntohl(data->opaque_data_0); + *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) = + ntohs((u16)data->opaque_data_1); qedr_inc_sw_gsi_cons(&qp->rq); @@ -111,6 +122,14 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } +void qedr_ll2_release_rx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, bool b_last_packet) +{ + /* Do nothing... */ +} + static void qedr_destroy_gsi_cq(struct qedr_dev *dev, struct ib_qp_init_attr *attrs) { @@ -159,27 +178,159 @@ static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev, return 0; } +static int qedr_ll2_post_tx(struct qedr_dev *dev, + struct qed_roce_ll2_packet *pkt) +{ + enum qed_ll2_roce_flavor_type roce_flavor; + struct qed_ll2_tx_pkt_info ll2_tx_pkt; + int rc; + int i; + + memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt)); + + roce_flavor = (pkt->roce_mode == ROCE_V1) ? + QED_LL2_ROCE : QED_LL2_RROCE; + + if (pkt->roce_mode == ROCE_V2_IPV4) + ll2_tx_pkt.enable_ip_cksum = 1; + + ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg; + ll2_tx_pkt.vlan = 0; + ll2_tx_pkt.tx_dest = pkt->tx_dest; + ll2_tx_pkt.qed_roce_flavor = roce_flavor; + ll2_tx_pkt.first_frag = pkt->header.baddr; + ll2_tx_pkt.first_frag_len = pkt->header.len; + ll2_tx_pkt.cookie = pkt; + + /* tx header */ + rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx, + dev->gsi_ll2_handle, + &ll2_tx_pkt, 1); + if (rc) { + /* TX failed while posting header - release resources */ + dma_free_coherent(&dev->pdev->dev, pkt->header.len, + pkt->header.vaddr, pkt->header.baddr); + kfree(pkt); + + DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc); + return rc; + } + + /* tx payload */ + for (i = 0; i < pkt->n_seg; i++) { + rc = dev->ops->ll2_set_fragment_of_tx_packet( + dev->rdma_ctx, + dev->gsi_ll2_handle, + pkt->payload[i].baddr, + pkt->payload[i].len); + + if (rc) { + /* if failed not much to do here, partial packet has + * been posted we can't free memory, will need to wait + * for completion + */ + DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc); + return rc; + } + } + + return 0; +} + +int qedr_ll2_stop(struct qedr_dev *dev) +{ + int rc; + + if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE) + return 0; + + /* remove LL2 MAC address filter */ + rc = dev->ops->ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, NULL); + + rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx, + dev->gsi_ll2_handle); + if (rc) + DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc); + + dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); + + dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE; + + return rc; +} + +int qedr_ll2_start(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs, struct qedr_qp *qp) +{ + struct qed_ll2_acquire_data data; + struct qed_ll2_cbs cbs; + int rc; + + /* configure and start LL2 */ + cbs.rx_comp_cb = qedr_ll2_complete_rx_packet; + cbs.tx_comp_cb = qedr_ll2_complete_tx_packet; + cbs.rx_release_cb = qedr_ll2_release_rx_packet; + cbs.tx_release_cb = qedr_ll2_complete_tx_packet; + cbs.cookie = dev; + + memset(&data, 0, sizeof(data)); + data.input.conn_type = QED_LL2_TYPE_ROCE; + data.input.mtu = dev->ndev->mtu; + data.input.rx_num_desc = attrs->cap.max_recv_wr; + data.input.rx_drop_ttl0_flg = true; + data.input.rx_vlan_removal_en = false; + data.input.tx_num_desc = attrs->cap.max_send_wr; + data.input.tx_tc = 0; + data.input.tx_dest = QED_LL2_TX_DEST_NW; + data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET; + data.input.ai_err_no_buf = QED_LL2_DROP_PACKET; + data.input.gsi_enable = 1; + data.p_connection_handle = &dev->gsi_ll2_handle; + data.cbs = &cbs; + + rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data); + if (rc) { + DP_ERR(dev, + "ll2 start: failed to acquire LL2 connection (rc=%d)\n", + rc); + return rc; + } + + rc = dev->ops->ll2_establish_connection(dev->rdma_ctx, + dev->gsi_ll2_handle); + if (rc) { + DP_ERR(dev, + "ll2 start: failed to establish LL2 connection (rc=%d)\n", + rc); + goto err1; + } + + rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr); + if (rc) + goto err2; + + return 0; + +err2: + dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle); +err1: + dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); + + return rc; +} + struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct qedr_qp *qp) { - struct qed_roce_ll2_params ll2_params; int rc; rc = qedr_check_gsi_qp_attrs(dev, attrs); if (rc) return ERR_PTR(rc); - /* configure and start LL2 */ - memset(&ll2_params, 0, sizeof(ll2_params)); - ll2_params.max_tx_buffers = attrs->cap.max_send_wr; - ll2_params.max_rx_buffers = attrs->cap.max_recv_wr; - ll2_params.cbs.tx_cb = qedr_ll2_tx_cb; - ll2_params.cbs.rx_cb = qedr_ll2_rx_cb; - ll2_params.cb_cookie = (void *)dev; - ll2_params.mtu = dev->ndev->mtu; - ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr); - rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params); + rc = qedr_ll2_start(dev, attrs, qp); if (rc) { DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); return ERR_PTR(rc); @@ -214,7 +365,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, err: kfree(qp->rqe_wr_id); - rc = dev->ops->roce_ll2_stop(dev->cdev); + rc = qedr_ll2_stop(dev); if (rc) DP_ERR(dev, "create gsi qp: failed destroy on create\n"); @@ -223,15 +374,7 @@ err: int qedr_destroy_gsi_qp(struct qedr_dev *dev) { - int rc; - - rc = dev->ops->roce_ll2_stop(dev->cdev); - if (rc) - DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc); - else - DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n"); - - return rc; + return qedr_ll2_stop(dev); } #define QEDR_MAX_UD_HEADER_SIZE (100) @@ -421,7 +564,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, { struct qed_roce_ll2_packet *pkt = NULL; struct qedr_qp *qp = get_qedr_qp(ibqp); - struct qed_roce_ll2_tx_params params; struct qedr_dev *dev = qp->dev; unsigned long flags; int rc; @@ -449,8 +591,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto err; } - memset(¶ms, 0, sizeof(params)); - spin_lock_irqsave(&qp->q_lock, flags); rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); @@ -459,7 +599,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto err; } - rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, ¶ms); + rc = qedr_ll2_post_tx(dev, pkt); + if (!rc) { qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; qedr_inc_sw_prod(&qp->sq); @@ -467,17 +608,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n", wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); } else { - if (rc == QED_ROCE_TX_HEAD_FAILURE) { - /* TX failed while posting header - release resources */ - dma_free_coherent(&dev->pdev->dev, pkt->header.len, - pkt->header.vaddr, pkt->header.baddr); - kfree(pkt); - } else if (rc == QED_ROCE_TX_FRAG_FAILURE) { - /* NTD since TX failed while posting a fragment. We will - * release the resources on TX callback - */ - } - DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc); rc = -EAGAIN; *bad_wr = wr; @@ -504,10 +634,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, { struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_qp *qp = get_qedr_qp(ibqp); - struct qed_roce_ll2_buffer buf; unsigned long flags; - int status = 0; - int rc; + int rc = 0; if ((qp->state != QED_ROCE_QP_STATE_RTR) && (qp->state != QED_ROCE_QP_STATE_RTS)) { @@ -518,8 +646,6 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, return -EINVAL; } - memset(&buf, 0, sizeof(buf)); - spin_lock_irqsave(&qp->q_lock, flags); while (wr) { @@ -530,10 +656,12 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, goto err; } - buf.baddr = wr->sg_list[0].addr; - buf.len = wr->sg_list[0].length; - - rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1); + rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx, + dev->gsi_ll2_handle, + wr->sg_list[0].addr, + wr->sg_list[0].length, + 0 /* cookie */, + 1 /* notify_fw */); if (rc) { DP_ERR(dev, "gsi post recv: failed to post rx buffer (rc=%d)\n", @@ -553,7 +681,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_unlock_irqrestore(&qp->q_lock, flags); - return status; + return rc; err: spin_unlock_irqrestore(&qp->q_lock, flags); *bad_wr = wr; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 17685cfea6a2..548e4d1e998f 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, struct qedr_pbl *pbl, - struct qedr_pbl_info *pbl_info) + struct qedr_pbl_info *pbl_info, u32 pg_shift) { int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; + u32 fw_pg_cnt, fw_pg_per_umem_pg; struct qedr_pbl *pbl_tbl; struct scatterlist *sg; struct regpair *pbe; + u64 pg_addr; int entry; - u32 addr; if (!pbl_info->num_pbes) return; @@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, shift = umem->page_shift; + fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift); + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { pages = sg_dma_len(sg) >> shift; + pg_addr = sg_dma_address(sg); for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { - /* store the page address in pbe */ - pbe->lo = cpu_to_le32(sg_dma_address(sg) + - (pg_cnt << shift)); - addr = upper_32_bits(sg_dma_address(sg) + - (pg_cnt << shift)); - pbe->hi = cpu_to_le32(addr); - pbe_cnt++; - total_num_pbes++; - pbe++; - - if (total_num_pbes == pbl_info->num_pbes) - return; - - /* If the given pbl is full storing the pbes, - * move to next pbl. - */ - if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { - pbl_tbl++; - pbe = (struct regpair *)pbl_tbl->va; - pbe_cnt = 0; + for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { + pbe->lo = cpu_to_le32(pg_addr); + pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); + + pg_addr += BIT(pg_shift); + pbe_cnt++; + total_num_pbes++; + pbe++; + + if (total_num_pbes == pbl_info->num_pbes) + return; + + /* If the given pbl is full storing the pbes, + * move to next pbl. + */ + if (pbe_cnt == + (pbl_info->pbl_size / sizeof(u64))) { + pbl_tbl++; + pbe = (struct regpair *)pbl_tbl->va; + pbe_cnt = 0; + } + + fw_pg_cnt++; } } } @@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, u64 buf_addr, size_t buf_len, int access, int dmasync) { - int page_cnt; + u32 fw_pages; int rc; q->buf_addr = buf_addr; @@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, return PTR_ERR(q->umem); } - page_cnt = ib_umem_page_count(q->umem); - rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); + fw_pages = ib_umem_page_count(q->umem) << + (q->umem->page_shift - FW_PAGE_SHIFT); + + rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); if (rc) goto err0; @@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, goto err0; } - qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); + qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, + FW_PAGE_SHIFT); return 0; @@ -925,7 +935,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev, QED_CHAIN_CNT_TYPE_U32, chain_entries, sizeof(union rdma_cqe), - &cq->pbl); + &cq->pbl, NULL); if (rc) goto err1; @@ -1413,7 +1423,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, QED_CHAIN_CNT_TYPE_U32, n_sq_elems, QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl); + &qp->sq.pbl, NULL); if (rc) return rc; @@ -1427,7 +1437,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, QED_CHAIN_CNT_TYPE_U32, n_rq_elems, QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl); + &qp->rq.pbl, NULL); if (rc) return rc; @@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, goto err1; qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, - &mr->info.pbl_info); + &mr->info.pbl_info, mr->umem->page_shift); rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { @@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, case IB_WC_REG_MR: qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; break; + case IB_WC_RDMA_READ: + case IB_WC_SEND: + wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; + break; default: break; } diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index ecdba2fce083..1ac5b8551a4d 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -68,6 +68,7 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, u32 crc, void *next, size_t len) { + u32 retval; int err; SHASH_DESC_ON_STACK(shash, rxe->tfm); @@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, return crc32_le(crc, next, len); } - return *(u32 *)shash_desc_ctx(shash); + retval = *(u32 *)shash_desc_ctx(shash); + barrier_data(shash_desc_ctx(shash)); + return retval; } int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 83d709e74dfb..073e66783f1d 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, sge = ibwr->sg_list; for (i = 0; i < num_sge; i++, sge++) { - if (qp->is_user && copy_from_user(p, (__user void *) - (uintptr_t)sge->addr, sge->length)) - return -EFAULT; - - else if (!qp->is_user) - memcpy(p, (void *)(uintptr_t)sge->addr, - sge->length); + memcpy(p, (void *)(uintptr_t)sge->addr, + sge->length); p += sge->length; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0060b2f9f659..efe7402f4885 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -863,7 +863,6 @@ dev_stop: set_bit(IPOIB_STOP_REAPER, &priv->flags); cancel_delayed_work(&priv->ah_reap_task); set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - napi_enable(&priv->napi); ipoib_ib_dev_stop(dev); return -1; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index a115c0b7a310..6e86eeee370e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -681,7 +681,7 @@ static void push_pseudo_header(struct sk_buff *skb, const char *daddr) { struct ipoib_pseudo_header *phdr; - phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr)); + phdr = skb_push(skb, sizeof(*phdr)); memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); } @@ -1129,7 +1129,7 @@ static int ipoib_hard_header(struct sk_buff *skb, { struct ipoib_header *header; - header = (struct ipoib_header *) skb_push(skb, sizeof *header); + header = skb_push(skb, sizeof *header); header->proto = htons(type); header->reserved = 0; @@ -1596,6 +1596,8 @@ static void ipoib_dev_uninit_default(struct net_device *dev) ipoib_transport_dev_cleanup(dev); + netif_napi_del(&priv->napi); + ipoib_cm_dev_cleanup(dev); kfree(priv->rx_ring); @@ -1649,6 +1651,7 @@ out_rx_ring_cleanup: kfree(priv->rx_ring); out: + netif_napi_del(&priv->napi); return -ENOMEM; } @@ -1890,6 +1893,7 @@ static struct net_device rn->send = ipoib_send; rn->attach_mcast = ipoib_mcast_attach; rn->detach_mcast = ipoib_mcast_detach; + rn->free_rdma_netdev = free_netdev; rn->hca = hca; dev->netdev_ops = &ipoib_netdev_default_pf; @@ -2237,6 +2241,7 @@ event_failed: device_init_failed: free_netdev(priv->dev); + kfree(priv); alloc_mem_failed: return ERR_PTR(result); @@ -2277,13 +2282,15 @@ static void ipoib_add_one(struct ib_device *device) static void ipoib_remove_one(struct ib_device *device, void *client_data) { - struct ipoib_dev_priv *priv, *tmp; + struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; struct list_head *dev_list = client_data; if (!dev_list) return; list_for_each_entry_safe(priv, tmp, dev_list, list) { + struct rdma_netdev *rn = netdev_priv(priv->dev); + ib_unregister_event_handler(&priv->event_handler); flush_workqueue(ipoib_workqueue); @@ -2300,7 +2307,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) flush_workqueue(priv->wq); unregister_netdev(priv->dev); - free_netdev(priv->dev); + rn->free_rdma_netdev(priv->dev); + + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) + kfree(cpriv); + kfree(priv); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index 28884781311b..3e44087935ae 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -64,8 +64,9 @@ nla_put_failure: return -EMSGSIZE; } -static int ipoib_changelink(struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) +static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) { u16 mode, umcast; int ret = 0; @@ -93,7 +94,8 @@ out_err: } static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) { struct net_device *pdev; struct ipoib_dev_priv *ppriv; @@ -133,7 +135,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, child_pkey, IPOIB_RTNL_CHILD); if (!err && data) - err = ipoib_changelink(dev, tb, data); + err = ipoib_changelink(dev, tb, data, extack); return err; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 36dc4fcaa3cd..081b33deff1b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) snprintf(intf_name, sizeof intf_name, "%s.%04x", ppriv->dev->name, pkey); + if (!rtnl_trylock()) + return restart_syscall(); + priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); if (!priv) return -ENOMEM; - if (!rtnl_trylock()) - return restart_syscall(); - down_write(&ppriv->vlan_rwsem); /* @@ -167,8 +167,10 @@ out: rtnl_unlock(); - if (result) + if (result) { free_netdev(priv->dev); + kfree(priv); + } return result; } @@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) if (dev) { free_netdev(dev); + kfree(priv); return 0; } diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c index 2e8fee982436..afa938bd26d6 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c @@ -460,7 +460,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb) sc = opa_vnic_get_sc(info, skb); l4_hdr = info->vesw.vesw_id; - mdata = (struct opa_vnic_skb_mdata *)skb_push(skb, sizeof(*mdata)); + mdata = skb_push(skb, sizeof(*mdata)); mdata->vl = opa_vnic_get_vl(adapter, skb); mdata->entropy = entropy; mdata->flags = 0; diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c index d66540e24885..62390e9e0023 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c @@ -146,15 +146,15 @@ static void vnic_get_ethtool_stats(struct net_device *netdev, int i; memset(&vstats, 0, sizeof(vstats)); - mutex_lock(&adapter->stats_lock); + spin_lock(&adapter->stats_lock); adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats); + spin_unlock(&adapter->stats_lock); for (i = 0; i < VNIC_STATS_LEN; i++) { char *p = (char *)&vstats + vnic_gstrings_stats[i].stat_offset; data[i] = (vnic_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - mutex_unlock(&adapter->stats_lock); } /* vnic_get_strings - get strings */ diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h index 6bba886bec1f..ca29e6d5aedc 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h @@ -214,7 +214,7 @@ struct opa_vnic_adapter { struct mutex mactbl_lock; /* Lock used to protect access to vnic counters */ - struct mutex stats_lock; + spinlock_t stats_lock; u8 flow_tbl[OPA_VNIC_FLOW_TBL_SIZE]; diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c index 905f39dda5aa..1a3c25364b64 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c @@ -69,9 +69,9 @@ static void opa_vnic_get_stats64(struct net_device *netdev, struct opa_vnic_stats vstats; memset(&vstats, 0, sizeof(vstats)); - mutex_lock(&adapter->stats_lock); + spin_lock(&adapter->stats_lock); adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats); - mutex_unlock(&adapter->stats_lock); + spin_unlock(&adapter->stats_lock); memcpy(stats, &vstats.netstats, sizeof(*stats)); } @@ -103,7 +103,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, int rc; /* pass entropy and vl as metadata in skb */ - mdata = (struct opa_vnic_skb_mdata *)skb_push(skb, sizeof(*mdata)); + mdata = skb_push(skb, sizeof(*mdata)); mdata->entropy = opa_vnic_calc_entropy(adapter, skb); mdata->vl = opa_vnic_get_vl(adapter, skb); rc = adapter->rn_ops->ndo_select_queue(netdev, skb, @@ -323,13 +323,13 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, else if (IS_ERR(netdev)) return ERR_CAST(netdev); + rn = netdev_priv(netdev); adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { rc = -ENOMEM; goto adapter_err; } - rn = netdev_priv(netdev); rn->clnt_priv = adapter; rn->hca = ibdev; rn->port_num = port_num; @@ -344,7 +344,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, netdev->hard_header_len += OPA_VNIC_SKB_HEADROOM; mutex_init(&adapter->lock); mutex_init(&adapter->mactbl_lock); - mutex_init(&adapter->stats_lock); + spin_lock_init(&adapter->stats_lock); SET_NETDEV_DEV(netdev, ibdev->dev.parent); @@ -364,10 +364,9 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, netdev_err: mutex_destroy(&adapter->lock); mutex_destroy(&adapter->mactbl_lock); - mutex_destroy(&adapter->stats_lock); kfree(adapter); adapter_err: - ibdev->free_rdma_netdev(netdev); + rn->free_rdma_netdev(netdev); return ERR_PTR(rc); } @@ -376,14 +375,13 @@ adapter_err: void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct ib_device *ibdev = adapter->ibdev; + struct rdma_netdev *rn = netdev_priv(netdev); v_info("removing\n"); unregister_netdev(netdev); opa_vnic_release_mac_tbl(adapter); mutex_destroy(&adapter->lock); mutex_destroy(&adapter->mactbl_lock); - mutex_destroy(&adapter->stats_lock); kfree(adapter); - ibdev->free_rdma_netdev(netdev); + rn->free_rdma_netdev(netdev); } diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c index 875694f9a7f9..cf768dd78d1b 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -794,7 +794,7 @@ void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter, send_buf = ib_create_send_mad(port->mad_agent, 1, pkey_idx, 0, IB_MGMT_VENDOR_HDR, IB_MGMT_MAD_DATA, - GFP_KERNEL, OPA_MGMT_BASE_VERSION); + GFP_ATOMIC, OPA_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) { c_err("%s:Couldn't allocate send buf\n", __func__); goto err_sndbuf; diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c index a51bf977f4d6..c2733964379c 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c @@ -89,9 +89,9 @@ void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter, u64 *src; memset(&vstats, 0, sizeof(vstats)); - mutex_lock(&adapter->stats_lock); + spin_lock(&adapter->stats_lock); adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats); - mutex_unlock(&adapter->stats_lock); + spin_unlock(&adapter->stats_lock); cntrs->vp_instance = cpu_to_be16(adapter->vport_num); cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id); @@ -128,9 +128,9 @@ void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter, struct opa_vnic_stats vstats; memset(&vstats, 0, sizeof(vstats)); - mutex_lock(&adapter->stats_lock); + spin_lock(&adapter->stats_lock); adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats); - mutex_unlock(&adapter->stats_lock); + spin_unlock(&adapter->stats_lock); cntrs->vp_instance = cpu_to_be16(adapter->vport_num); cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id); |