diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
| -rw-r--r-- | drivers/infiniband/hw/mlx4/alias_GUID.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/cm.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 9 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/mad.c | 18 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 277 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/mcg.c | 8 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 26 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 290 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 162 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/sysfs.c | 2 |
10 files changed, 338 insertions, 460 deletions
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 111fa88a3be4..d7327735b8d0 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -829,7 +829,6 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { - char alias_wq_name[15]; int ret = 0; int i, j; union ib_gid gid; @@ -875,9 +874,8 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].port = i; - snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); dev->sriov.alias_guid.ports_guid[i].wq = - alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM); + alloc_ordered_workqueue("alias_guid%d", WQ_MEM_RECLAIM, i); if (!dev->sriov.alias_guid.ports_guid[i].wq) { ret = -ENOMEM; goto err_thread; diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index 12b481d138cf..03aacd526860 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -591,7 +591,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave) int mlx4_ib_cm_init(void) { - cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0); + cm_wq = alloc_workqueue("mlx4_ib_cm", WQ_PERCPU, 0); if (!cm_wq) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 4cd738aae53c..c592374f4a58 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -150,8 +150,12 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, return PTR_ERR(*umem); shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n); - err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); + if (shift < 0) { + err = shift; + goto err_buf; + } + err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); if (err) goto err_buf; @@ -172,8 +176,9 @@ err_buf: #define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, - struct ib_udata *udata) + struct uverbs_attr_bundle *attrs) { + struct ib_udata *udata = &attrs->driver_udata; struct ib_device *ibdev = ibcq->device; int entries = attr->cqe; int vector = attr->comp_vector; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index a37cfac5e23f..91c714f72099 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1836,9 +1836,9 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); if (IS_ERR(tun_qp->qp)) { ret = PTR_ERR(tun_qp->qp); + pr_err("Couldn't create %s QP (%pe)\n", + create_tun ? "tunnel" : "special", tun_qp->qp); tun_qp->qp = NULL; - pr_err("Couldn't create %s QP (%d)\n", - create_tun ? "tunnel" : "special", ret); return ret; } @@ -2017,14 +2017,14 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, NULL, ctx, &cq_attr); if (IS_ERR(ctx->cq)) { ret = PTR_ERR(ctx->cq); - pr_err("Couldn't create tunnel CQ (%d)\n", ret); + pr_err("Couldn't create tunnel CQ (%pe)\n", ctx->cq); goto err_buf; } ctx->pd = ib_alloc_pd(ctx->ib_dev, 0); if (IS_ERR(ctx->pd)) { ret = PTR_ERR(ctx->pd); - pr_err("Couldn't create tunnel PD (%d)\n", ret); + pr_err("Couldn't create tunnel PD (%pe)\n", ctx->pd); goto err_cq; } @@ -2158,7 +2158,6 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, struct mlx4_ib_demux_ctx *ctx, int port) { - char name[12]; int ret = 0; int i; @@ -2194,24 +2193,21 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, goto err_mcg; } - snprintf(name, sizeof(name), "mlx4_ibt%d", port); - ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + ctx->wq = alloc_ordered_workqueue("mlx4_ibt%d", WQ_MEM_RECLAIM, port); if (!ctx->wq) { pr_err("Failed to create tunnelling WQ for port %d\n", port); ret = -ENOMEM; goto err_wq; } - snprintf(name, sizeof(name), "mlx4_ibwi%d", port); - ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + ctx->wi_wq = alloc_ordered_workqueue("mlx4_ibwi%d", WQ_MEM_RECLAIM, port); if (!ctx->wi_wq) { pr_err("Failed to create wire WQ for port %d\n", port); ret = -ENOMEM; goto err_wiwq; } - snprintf(name, sizeof(name), "mlx4_ibud%d", port); - ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + ctx->ud_wq = alloc_ordered_workqueue("mlx4_ibud%d", WQ_MEM_RECLAIM, port); if (!ctx->ud_wq) { pr_err("Failed to create up/down WQ for port %d\n", port); ret = -ENOMEM; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index dceebcd885bb..dd35e03402ab 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -82,6 +82,8 @@ static const char mlx4_ib_version[] = static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num); +static int mlx4_ib_event(struct notifier_block *this, unsigned long event, + void *param); static struct workqueue_struct *wq; @@ -125,14 +127,16 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u32 port_num) { struct mlx4_ib_dev *ibdev = to_mdev(device); - struct net_device *dev; + struct net_device *dev, *ret = NULL; rcu_read_lock(); - dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num); + for_each_netdev_rcu(&init_net, dev) { + if (dev->dev.parent != ibdev->ib_dev.dev.parent || + dev->dev_port + 1 != port_num) + continue; - if (dev) { if (mlx4_is_bonded(ibdev->dev)) { - struct net_device *upper = NULL; + struct net_device *upper; upper = netdev_master_upper_dev_get_rcu(dev); if (upper) { @@ -143,11 +147,14 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, dev = active; } } + + dev_hold(dev); + ret = dev; + break; } - dev_hold(dev); rcu_read_unlock(); - return dev; + return ret; } static int mlx4_ib_update_gids_v1(struct gid_entry *gids, @@ -254,7 +261,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context) int ret = 0; int hw_update = 0; int i; - struct gid_entry *gids = NULL; + struct gid_entry *gids; u16 vlan_id = 0xffff; u8 mac[ETH_ALEN]; @@ -293,8 +300,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context) ret = -ENOMEM; } else { *context = port_gid_table->gids[free].ctx; - memcpy(&port_gid_table->gids[free].gid, - &attr->gid, sizeof(attr->gid)); + port_gid_table->gids[free].gid = attr->gid; port_gid_table->gids[free].gid_type = attr->gid_type; port_gid_table->gids[free].vlan_id = vlan_id; port_gid_table->gids[free].ctx->real_index = free; @@ -383,10 +389,10 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context) } spin_unlock_bh(&iboe->lock); - if (!ret && hw_update) { + if (gids) ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); - kfree(gids); - } + + kfree(gids); return ret; } @@ -431,8 +437,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_udata *uhw) { struct mlx4_ib_dev *dev = to_mdev(ibdev); - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int err; int have_ib_ports; struct mlx4_uverbs_ex_query_device cmd; @@ -649,8 +655,8 @@ mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num) static int ib_link_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props, int netw_view) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int ext_active_speed; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; @@ -827,8 +833,8 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port, int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid, int netw_view) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int err = -ENOMEM; struct mlx4_ib_dev *dev = to_mdev(ibdev); int clear = 0; @@ -892,8 +898,8 @@ static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port, u64 *sl2vl_tbl) { union sl2vl_tbl_to_u64 sl2vl64; - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; int jj; @@ -952,8 +958,8 @@ static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev) int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey, int netw_view) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; @@ -1968,8 +1974,8 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) static int init_node_data(struct mlx4_ib_dev *dev) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; + struct ib_smp *in_mad; + struct ib_smp *out_mad; int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; int err = -ENOMEM; @@ -2319,61 +2325,54 @@ unlock: mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); } -static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, - struct net_device *dev, - unsigned long event) +static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev, + struct net_device *dev, + unsigned long event) { - struct mlx4_ib_iboe *iboe; - int update_qps_port = -1; - int port; + struct mlx4_ib_iboe *iboe = &ibdev->iboe; ASSERT_RTNL(); - iboe = &ibdev->iboe; + if (dev->dev.parent != ibdev->ib_dev.dev.parent) + return; spin_lock_bh(&iboe->lock); - mlx4_foreach_ib_transport_port(port, ibdev->dev) { - iboe->netdevs[port - 1] = - mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); + iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL; - if (dev == iboe->netdevs[port - 1] && - (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || - event == NETDEV_UP || event == NETDEV_CHANGE)) - update_qps_port = port; + spin_unlock_bh(&iboe->lock); - if (dev == iboe->netdevs[port - 1] && - (event == NETDEV_UP || event == NETDEV_DOWN)) { - enum ib_port_state port_state; - struct ib_event ibev = { }; + if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER) + mlx4_ib_update_qps(ibdev, dev, dev->dev_port + 1); +} - if (ib_get_cached_port_state(&ibdev->ib_dev, port, - &port_state)) - continue; +static void mlx4_ib_port_event(struct ib_device *ibdev, struct net_device *ndev, + unsigned long event) +{ + struct mlx4_ib_dev *mlx4_ibdev = + container_of(ibdev, struct mlx4_ib_dev, ib_dev); + struct mlx4_ib_iboe *iboe = &mlx4_ibdev->iboe; - if (event == NETDEV_UP && - (port_state != IB_PORT_ACTIVE || - iboe->last_port_state[port - 1] != IB_PORT_DOWN)) - continue; - if (event == NETDEV_DOWN && - (port_state != IB_PORT_DOWN || - iboe->last_port_state[port - 1] != IB_PORT_ACTIVE)) - continue; - iboe->last_port_state[port - 1] = port_state; + if (!net_eq(dev_net(ndev), &init_net)) + return; - ibev.device = &ibdev->ib_dev; - ibev.element.port_num = port; - ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE : - IB_EVENT_PORT_ERR; - ib_dispatch_event(&ibev); - } + ASSERT_RTNL(); + + if (ndev->dev.parent != mlx4_ibdev->ib_dev.dev.parent) + return; + + spin_lock_bh(&iboe->lock); + + iboe->netdevs[ndev->dev_port] = event != NETDEV_UNREGISTER ? ndev : NULL; + + if (event == NETDEV_UP || event == NETDEV_DOWN) + ib_dispatch_port_state_event(&mlx4_ibdev->ib_dev, ndev); - } spin_unlock_bh(&iboe->lock); - if (update_qps_port > 0) - mlx4_ib_update_qps(ibdev, dev, update_qps_port); + if (event == NETDEV_UP || event == NETDEV_CHANGE) + mlx4_ib_update_qps(mlx4_ibdev, ndev, ndev->dev_port + 1); } static int mlx4_ib_netdev_event(struct notifier_block *this, @@ -2386,7 +2385,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, return NOTIFY_DONE; ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); - mlx4_ib_scan_netdevs(ibdev, dev, event); + mlx4_ib_scan_netdev(ibdev, dev, event); return NOTIFY_DONE; } @@ -2571,6 +2570,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = { .req_notify_cq = mlx4_ib_arm_cq, .rereg_user_mr = mlx4_ib_rereg_user_mr, .resize_cq = mlx4_ib_resize_cq, + .report_port_event = mlx4_ib_port_event, INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq), @@ -2610,8 +2610,11 @@ static const struct ib_device_ops mlx4_ib_dev_fs_ops = { .destroy_flow = mlx4_ib_destroy_flow, }; -static void *mlx4_ib_add(struct mlx4_dev *dev) +static int mlx4_ib_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; struct mlx4_ib_dev *ibdev; int num_ports = 0; int i, j; @@ -2621,7 +2624,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) int num_req_counters; int allocated; u32 counter_index; - struct counter_index *new_counter_index = NULL; + struct counter_index *new_counter_index; pr_info_once("%s", mlx4_ib_version); @@ -2631,27 +2634,31 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) /* No point in registering a device with no ports... */ if (num_ports == 0) - return NULL; + return -ENODEV; ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev); if (!ibdev) { dev_err(&dev->persist->pdev->dev, "Device struct alloc failed\n"); - return NULL; + return -ENOMEM; } iboe = &ibdev->iboe; - if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) + err = mlx4_pd_alloc(dev, &ibdev->priv_pdn); + if (err) goto err_dealloc; - if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) + err = mlx4_uar_alloc(dev, &ibdev->priv_uar); + if (err) goto err_pd; ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); - if (!ibdev->uar_map) + if (!ibdev->uar_map) { + err = -ENOMEM; goto err_uar; + } MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); ibdev->dev = dev; @@ -2695,7 +2702,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) spin_lock_init(&iboe->lock); - if (init_node_data(ibdev)) + err = init_node_data(ibdev); + if (err) goto err_map; mlx4_init_sl2vl_tbl(ibdev); @@ -2727,6 +2735,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL); if (!new_counter_index) { + err = -ENOMEM; if (allocated) mlx4_counter_free(ibdev->dev, counter_index); goto err_counter; @@ -2744,8 +2753,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) new_counter_index = kmalloc(sizeof(struct counter_index), GFP_KERNEL); - if (!new_counter_index) + if (!new_counter_index) { + err = -ENOMEM; goto err_counter; + } new_counter_index->index = counter_index; new_counter_index->allocated = 0; list_add_tail(&new_counter_index->list, @@ -2774,8 +2785,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, GFP_KERNEL); - if (!ibdev->ib_uc_qpns_bitmap) + if (!ibdev->ib_uc_qpns_bitmap) { + err = -ENOMEM; goto err_steer_qp_release; + } if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) { bitmap_zero(ibdev->ib_uc_qpns_bitmap, @@ -2795,17 +2808,21 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) for (j = 1; j <= ibdev->dev->caps.num_ports; j++) atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); - if (mlx4_ib_alloc_diag_counters(ibdev)) + err = mlx4_ib_alloc_diag_counters(ibdev); + if (err) goto err_steer_free_bitmap; - if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", - &dev->persist->pdev->dev)) + err = ib_register_device(&ibdev->ib_dev, "mlx4_%d", + &dev->persist->pdev->dev); + if (err) goto err_diag_counters; - if (mlx4_ib_mad_init(ibdev)) + err = mlx4_ib_mad_init(ibdev); + if (err) goto err_reg; - if (mlx4_ib_init_sriov(ibdev)) + err = mlx4_ib_init_sriov(ibdev); + if (err) goto err_mad; if (!iboe->nb.notifier_call) { @@ -2839,7 +2856,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) do_slave_init(ibdev, j, 1); } } - return ibdev; + + /* register mlx4 core notifier */ + ibdev->mlx_nb.notifier_call = mlx4_ib_event; + err = mlx4_register_event_notifier(dev, &ibdev->mlx_nb); + WARN(err, "failed to register mlx4 event notifier (%d)", err); + + auxiliary_set_drvdata(adev, ibdev); + return 0; err_notif: if (ibdev->iboe.nb.notifier_call) { @@ -2883,7 +2907,7 @@ err_pd: err_dealloc: ib_dealloc_device(&ibdev->ib_dev); - return NULL; + return err; } int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) @@ -2923,7 +2947,7 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, { int err; size_t flow_size; - struct ib_flow_attr *flow = NULL; + struct ib_flow_attr *flow; struct ib_flow_spec_ib *ib_spec; if (is_attach) { @@ -2943,19 +2967,23 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC, MLX4_FS_REGULAR, &mqp->reg_id); - } else { - err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); + kfree(flow); + return err; } - kfree(flow); - return err; + + return __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); } -static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) +static void mlx4_ib_remove(struct auxiliary_device *adev) { - struct mlx4_ib_dev *ibdev = ibdev_ptr; + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; + struct mlx4_ib_dev *ibdev = auxiliary_get_drvdata(adev); int p; int i; + mlx4_unregister_event_notifier(dev, &ibdev->mlx_nb); + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) devlink_port_type_clear(mlx4_get_devlink_port(dev, i)); ibdev->ib_active = false; @@ -2992,7 +3020,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) { - struct mlx4_ib_demux_work **dm = NULL; + struct mlx4_ib_demux_work **dm; struct mlx4_dev *dev = ibdev->dev; int i; unsigned long flags; @@ -3176,11 +3204,13 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev, } } -static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, - enum mlx4_dev_event event, unsigned long param) +static int mlx4_ib_event(struct notifier_block *this, unsigned long event, + void *param) { + struct mlx4_ib_dev *ibdev = + container_of(this, struct mlx4_ib_dev, mlx_nb); + struct mlx4_dev *dev = ibdev->dev; struct ib_event ibev; - struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); struct mlx4_eqe *eqe = NULL; struct ib_event_work *ew; int p = 0; @@ -3190,22 +3220,28 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, (event == MLX4_DEV_EVENT_PORT_DOWN))) { ew = kmalloc(sizeof(*ew), GFP_ATOMIC); if (!ew) - return; + return NOTIFY_DONE; INIT_WORK(&ew->work, handle_bonded_port_state_event); ew->ib_dev = ibdev; queue_work(wq, &ew->work); - return; + return NOTIFY_DONE; } - if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) + switch (event) { + case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: + break; + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: eqe = (struct mlx4_eqe *)param; - else - p = (int) param; + break; + default: + p = *(int *)param; + break; + } switch (event) { case MLX4_DEV_EVENT_PORT_UP: if (p > ibdev->num_ports) - return; + return NOTIFY_DONE; if (!mlx4_is_slave(dev) && rdma_port_get_link_layer(&ibdev->ib_dev, p) == IB_LINK_LAYER_INFINIBAND) { @@ -3220,7 +3256,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_PORT_DOWN: if (p > ibdev->num_ports) - return; + return NOTIFY_DONE; ibev.event = IB_EVENT_PORT_ERR; break; @@ -3233,7 +3269,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: ew = kmalloc(sizeof *ew, GFP_ATOMIC); if (!ew) - return; + return NOTIFY_DONE; INIT_WORK(&ew->work, handle_port_mgmt_change_event); memcpy(&ew->ib_eqe, eqe, sizeof *eqe); @@ -3243,7 +3279,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, queue_work(wq, &ew->work); else handle_port_mgmt_change_event(&ew->work); - return; + return NOTIFY_DONE; case MLX4_DEV_EVENT_SLAVE_INIT: /* here, p is the slave id */ @@ -3259,7 +3295,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 1); } } - return; + return NOTIFY_DONE; case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: if (mlx4_is_master(dev)) { @@ -3275,22 +3311,33 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, } /* here, p is the slave id */ do_slave_init(ibdev, p, 0); - return; + return NOTIFY_DONE; default: - return; + return NOTIFY_DONE; } - ibev.device = ibdev_ptr; + ibev.device = &ibdev->ib_dev; ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; ib_dispatch_event(&ibev); + return NOTIFY_DONE; } -static struct mlx4_interface mlx4_ib_interface = { - .add = mlx4_ib_add, - .remove = mlx4_ib_remove, - .event = mlx4_ib_event, +static const struct auxiliary_device_id mlx4_ib_id_table[] = { + { .name = MLX4_ADEV_NAME ".ib" }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx4_ib_id_table); + +static struct mlx4_adrv mlx4_ib_adrv = { + .adrv = { + .name = "ib", + .probe = mlx4_ib_probe, + .remove = mlx4_ib_remove, + .id_table = mlx4_ib_id_table, + }, .protocol = MLX4_PROT_IB_IPV6, .flags = MLX4_INTFF_BONDING }; @@ -3303,6 +3350,10 @@ static int __init mlx4_ib_init(void) if (!wq) return -ENOMEM; + err = mlx4_ib_qp_event_init(); + if (err) + goto clean_qp_event; + err = mlx4_ib_cm_init(); if (err) goto clean_wq; @@ -3311,7 +3362,7 @@ static int __init mlx4_ib_init(void) if (err) goto clean_cm; - err = mlx4_register_interface(&mlx4_ib_interface); + err = mlx4_register_auxiliary_driver(&mlx4_ib_adrv); if (err) goto clean_mcg; @@ -3324,15 +3375,19 @@ clean_cm: mlx4_ib_cm_destroy(); clean_wq: + mlx4_ib_qp_event_cleanup(); + +clean_qp_event: destroy_workqueue(wq); return err; } static void __exit mlx4_ib_cleanup(void) { - mlx4_unregister_interface(&mlx4_ib_interface); + mlx4_unregister_auxiliary_driver(&mlx4_ib_adrv); mlx4_ib_mcg_destroy(); mlx4_ib_cm_destroy(); + mlx4_ib_qp_event_cleanup(); destroy_workqueue(wq); } diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index 33f525b744f2..e279e69b9a51 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -43,7 +43,7 @@ #define MAX_VFS 80 #define MAX_PEND_REQS_PER_FUNC 4 -#define MAD_TIMEOUT_MS 2000 +#define MAD_TIMEOUT_SEC 2 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg) #define mcg_error(fmt, arg...) pr_err(fmt, ##arg) @@ -270,7 +270,7 @@ static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad if (!ret) { /* calls mlx4_ib_mcg_timeout_handler */ queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, - msecs_to_jiffies(MAD_TIMEOUT_MS)); + secs_to_jiffies(MAD_TIMEOUT_SEC)); } return ret; @@ -309,7 +309,7 @@ static int send_leave_to_wire(struct mcast_group *group, u8 join_state) if (!ret) { /* calls mlx4_ib_mcg_timeout_handler */ queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, - msecs_to_jiffies(MAD_TIMEOUT_MS)); + secs_to_jiffies(MAD_TIMEOUT_SEC)); } return ret; @@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy for (i = 0; i < MAX_VFS; ++i) clean_vf_mcast(ctx, i); - end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000); + end = jiffies + secs_to_jiffies(MAD_TIMEOUT_SEC + 3); do { count = 0; mutex_lock(&ctx->mcg_table_lock); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6a3b0f121045..5df5b955114e 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -38,6 +38,7 @@ #include <linux/list.h> #include <linux/mutex.h> #include <linux/idr.h> +#include <linux/notifier.h> #include <rdma/ib_verbs.h> #include <rdma/ib_umem.h> @@ -644,6 +645,7 @@ struct mlx4_ib_dev { spinlock_t reset_flow_resource_lock; struct list_head qp_list; struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES]; + struct notifier_block mlx_nb; }; struct ib_event_work { @@ -665,6 +667,9 @@ struct mlx4_uverbs_ex_query_device { __u32 reserved; }; +/* 4k - 4G */ +#define MLX4_PAGE_SIZE_SUPPORTED ((unsigned long)GENMASK_ULL(31, 12)) + static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) { return container_of(ibdev, struct mlx4_ib_dev, ib_dev); @@ -754,6 +759,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, struct ib_umem *umem); struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, + struct ib_dmah *dmah, struct ib_udata *udata); int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata); int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); @@ -765,7 +771,7 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, - struct ib_udata *udata); + struct uverbs_attr_bundle *attrs); int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); @@ -934,10 +940,24 @@ mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table) { return 0; } -int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, - int *num_of_mtts); +static inline int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, + u64 start, + int *num_of_mtts) +{ + unsigned long pg_sz; + + pg_sz = ib_umem_find_best_pgsz(umem, MLX4_PAGE_SIZE_SUPPORTED, start); + if (!pg_sz) + return -EOPNOTSUPP; + + *num_of_mtts = ib_umem_num_dma_blocks(umem, pg_sz); + return order_base_2(pg_sz); +} int mlx4_ib_cm_init(void); void mlx4_ib_cm_destroy(void); +int mlx4_ib_qp_event_init(void); +void mlx4_ib_qp_event_cleanup(void); + #endif /* MLX4_IB_H */ diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index a40bf58bcdd3..94464f1694d9 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -87,286 +87,20 @@ err_free: return ERR_PTR(err); } -enum { - MLX4_MAX_MTT_SHIFT = 31 -}; - -static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev, - struct mlx4_mtt *mtt, - u64 mtt_size, u64 mtt_shift, u64 len, - u64 cur_start_addr, u64 *pages, - int *start_index, int *npages) -{ - u64 cur_end_addr = cur_start_addr + len; - u64 cur_end_addr_aligned = 0; - u64 mtt_entries; - int err = 0; - int k; - - len += (cur_start_addr & (mtt_size - 1ULL)); - cur_end_addr_aligned = round_up(cur_end_addr, mtt_size); - len += (cur_end_addr_aligned - cur_end_addr); - if (len & (mtt_size - 1ULL)) { - pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n", - len, mtt_size); - return -EINVAL; - } - - mtt_entries = (len >> mtt_shift); - - /* - * Align the MTT start address to the mtt_size. - * Required to handle cases when the MR starts in the middle of an MTT - * record. Was not required in old code since the physical addresses - * provided by the dma subsystem were page aligned, which was also the - * MTT size. - */ - cur_start_addr = round_down(cur_start_addr, mtt_size); - /* A new block is started ... */ - for (k = 0; k < mtt_entries; ++k) { - pages[*npages] = cur_start_addr + (mtt_size * k); - (*npages)++; - /* - * Be friendly to mlx4_write_mtt() and pass it chunks of - * appropriate size. - */ - if (*npages == PAGE_SIZE / sizeof(u64)) { - err = mlx4_write_mtt(dev->dev, mtt, *start_index, - *npages, pages); - if (err) - return err; - - (*start_index) += *npages; - *npages = 0; - } - } - - return 0; -} - -static inline u64 alignment_of(u64 ptr) -{ - return ilog2(ptr & (~(ptr - 1))); -} - -static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start, - u64 current_block_end, - u64 block_shift) -{ - /* Check whether the alignment of the new block is aligned as well as - * the previous block. - * Block address must start with zeros till size of entity_size. - */ - if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0) - /* - * It is not as well aligned as the previous block-reduce the - * mtt size accordingly. Here we take the last right bit which - * is 1. - */ - block_shift = alignment_of(next_block_start); - - /* - * Check whether the alignment of the end of previous block - is it - * aligned as well as the start of the block - */ - if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0) - /* - * It is not as well aligned as the start of the block - - * reduce the mtt size accordingly. - */ - block_shift = alignment_of(current_block_end); - - return block_shift; -} - int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, struct ib_umem *umem) { - u64 *pages; - u64 len = 0; - int err = 0; - u64 mtt_size; - u64 cur_start_addr = 0; - u64 mtt_shift; - int start_index = 0; - int npages = 0; - struct scatterlist *sg; - int i; - - pages = (u64 *) __get_free_page(GFP_KERNEL); - if (!pages) - return -ENOMEM; - - mtt_shift = mtt->page_shift; - mtt_size = 1ULL << mtt_shift; - - for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { - if (cur_start_addr + len == sg_dma_address(sg)) { - /* still the same block */ - len += sg_dma_len(sg); - continue; - } - /* - * A new block is started ... - * If len is malaligned, write an extra mtt entry to cover the - * misaligned area (round up the division) - */ - err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, - mtt_shift, len, - cur_start_addr, - pages, &start_index, - &npages); - if (err) - goto out; - - cur_start_addr = sg_dma_address(sg); - len = sg_dma_len(sg); - } + struct ib_block_iter biter; + int err, i = 0; + u64 addr; - /* Handle the last block */ - if (len > 0) { - /* - * If len is malaligned, write an extra mtt entry to cover - * the misaligned area (round up the division) - */ - err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, - mtt_shift, len, - cur_start_addr, pages, - &start_index, &npages); + rdma_umem_for_each_dma_block(umem, &biter, BIT(mtt->page_shift)) { + addr = rdma_block_iter_dma_address(&biter); + err = mlx4_write_mtt(dev->dev, mtt, i++, 1, &addr); if (err) - goto out; + return err; } - - if (npages) - err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages); - -out: - free_page((unsigned long) pages); - return err; -} - -/* - * Calculate optimal mtt size based on contiguous pages. - * Function will return also the number of pages that are not aligned to the - * calculated mtt_size to be added to total number of pages. For that we should - * check the first chunk length & last chunk length and if not aligned to - * mtt_size we should increment the non_aligned_pages number. All chunks in the - * middle already handled as part of mtt shift calculation for both their start - * & end addresses. - */ -int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, - int *num_of_mtts) -{ - u64 block_shift = MLX4_MAX_MTT_SHIFT; - u64 min_shift = PAGE_SHIFT; - u64 last_block_aligned_end = 0; - u64 current_block_start = 0; - u64 first_block_start = 0; - u64 current_block_len = 0; - u64 last_block_end = 0; - struct scatterlist *sg; - u64 current_block_end; - u64 misalignment_bits; - u64 next_block_start; - u64 total_len = 0; - int i; - - *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); - - for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { - /* - * Initialization - save the first chunk start as the - * current_block_start - block means contiguous pages. - */ - if (current_block_len == 0 && current_block_start == 0) { - current_block_start = sg_dma_address(sg); - first_block_start = current_block_start; - /* - * Find the bits that are different between the physical - * address and the virtual address for the start of the - * MR. - * umem_get aligned the start_va to a page boundary. - * Therefore, we need to align the start va to the same - * boundary. - * misalignment_bits is needed to handle the case of a - * single memory region. In this case, the rest of the - * logic will not reduce the block size. If we use a - * block size which is bigger than the alignment of the - * misalignment bits, we might use the virtual page - * number instead of the physical page number, resulting - * in access to the wrong data. - */ - misalignment_bits = - (start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^ - current_block_start; - block_shift = min(alignment_of(misalignment_bits), - block_shift); - } - - /* - * Go over the scatter entries and check if they continue the - * previous scatter entry. - */ - next_block_start = sg_dma_address(sg); - current_block_end = current_block_start + current_block_len; - /* If we have a split (non-contig.) between two blocks */ - if (current_block_end != next_block_start) { - block_shift = mlx4_ib_umem_calc_block_mtt - (next_block_start, - current_block_end, - block_shift); - - /* - * If we reached the minimum shift for 4k page we stop - * the loop. - */ - if (block_shift <= min_shift) - goto end; - - /* - * If not saved yet we are in first block - we save the - * length of first block to calculate the - * non_aligned_pages number at the end. - */ - total_len += current_block_len; - - /* Start a new block */ - current_block_start = next_block_start; - current_block_len = sg_dma_len(sg); - continue; - } - /* The scatter entry is another part of the current block, - * increase the block size. - * An entry in the scatter can be larger than 4k (page) as of - * dma mapping which merge some blocks together. - */ - current_block_len += sg_dma_len(sg); - } - - /* Account for the last block in the total len */ - total_len += current_block_len; - /* Add to the first block the misalignment that it suffers from. */ - total_len += (first_block_start & ((1ULL << block_shift) - 1ULL)); - last_block_end = current_block_start + current_block_len; - last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift); - total_len += (last_block_aligned_end - last_block_end); - - if (total_len & ((1ULL << block_shift) - 1ULL)) - pr_warn("misaligned total length detected (%llu, %llu)!", - total_len, block_shift); - - *num_of_mtts = total_len >> block_shift; -end: - if (block_shift < min_shift) { - /* - * If shift is less than the min we set a warning and return the - * min shift. - */ - pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift); - - block_shift = min_shift; - } - return block_shift; + return 0; } static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start, @@ -405,6 +139,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start, struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, + struct ib_dmah *dmah, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(pd->device); @@ -413,6 +148,9 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int err; int n; + if (dmah) + return ERR_PTR(-EOPNOTSUPP); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); @@ -424,6 +162,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, } shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); + if (shift < 0) { + err = shift; + goto err_umem; + } err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, convert_access(access_flags), n, shift, &mr->mmr); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b17d6ebc5b70..f2887ae6390e 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -102,6 +102,14 @@ enum mlx4_ib_source_type { MLX4_IB_RWQ_SRC = 1, }; +struct mlx4_ib_qp_event_work { + struct work_struct work; + struct mlx4_qp *qp; + enum mlx4_event type; +}; + +static struct workqueue_struct *mlx4_ib_qp_event_wq; + static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { if (!mlx4_is_master(dev->dev)) @@ -200,50 +208,77 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) } } +static void mlx4_ib_handle_qp_event(struct work_struct *_work) +{ + struct mlx4_ib_qp_event_work *qpe_work = + container_of(_work, struct mlx4_ib_qp_event_work, work); + struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; + struct ib_event event = {}; + + event.device = ibqp->device; + event.element.qp = ibqp; + + switch (qpe_work->type) { + case MLX4_EVENT_TYPE_PATH_MIG: + event.event = IB_EVENT_PATH_MIG; + break; + case MLX4_EVENT_TYPE_COMM_EST: + event.event = IB_EVENT_COMM_EST; + break; + case MLX4_EVENT_TYPE_SQ_DRAINED: + event.event = IB_EVENT_SQ_DRAINED; + break; + case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: + event.event = IB_EVENT_QP_LAST_WQE_REACHED; + break; + case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case MLX4_EVENT_TYPE_PATH_MIG_FAILED: + event.event = IB_EVENT_PATH_MIG_ERR; + break; + case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + default: + pr_warn("Unexpected event type %d on QP %06x\n", + qpe_work->type, qpe_work->qp->qpn); + goto out; + } + + ibqp->event_handler(&event, ibqp->qp_context); + +out: + mlx4_put_qp(qpe_work->qp); + kfree(qpe_work); +} + static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) { - struct ib_event event; struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; + struct mlx4_ib_qp_event_work *qpe_work; if (type == MLX4_EVENT_TYPE_PATH_MIG) to_mibqp(qp)->port = to_mibqp(qp)->alt_port; - if (ibqp->event_handler) { - event.device = ibqp->device; - event.element.qp = ibqp; - switch (type) { - case MLX4_EVENT_TYPE_PATH_MIG: - event.event = IB_EVENT_PATH_MIG; - break; - case MLX4_EVENT_TYPE_COMM_EST: - event.event = IB_EVENT_COMM_EST; - break; - case MLX4_EVENT_TYPE_SQ_DRAINED: - event.event = IB_EVENT_SQ_DRAINED; - break; - case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: - event.event = IB_EVENT_QP_LAST_WQE_REACHED; - break; - case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: - event.event = IB_EVENT_QP_FATAL; - break; - case MLX4_EVENT_TYPE_PATH_MIG_FAILED: - event.event = IB_EVENT_PATH_MIG_ERR; - break; - case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: - event.event = IB_EVENT_QP_REQ_ERR; - break; - case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: - event.event = IB_EVENT_QP_ACCESS_ERR; - break; - default: - pr_warn("Unexpected event type %d " - "on QP %06x\n", type, qp->qpn); - return; - } + if (!ibqp->event_handler) + goto out_no_handler; - ibqp->event_handler(&event, ibqp->qp_context); - } + qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC); + if (!qpe_work) + goto out_no_handler; + + qpe_work->qp = qp; + qpe_work->type = type; + INIT_WORK(&qpe_work->work, mlx4_ib_handle_qp_event); + queue_work(mlx4_ib_qp_event_wq, &qpe_work->work); + return; + +out_no_handler: + mlx4_put_qp(qp); } static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) @@ -412,9 +447,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) { + u32 cnt; + /* Sanity check SQ size before proceeding */ - if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || - ucmd->log_sq_stride > + if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || + cnt > dev->dev->caps.max_wqes) + return -EINVAL; + if (ucmd->log_sq_stride > ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) return -EINVAL; @@ -526,15 +565,15 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, return (-EOPNOTSUPP); } - if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | - MLX4_IB_RX_HASH_DST_IPV4 | - MLX4_IB_RX_HASH_SRC_IPV6 | - MLX4_IB_RX_HASH_DST_IPV6 | - MLX4_IB_RX_HASH_SRC_PORT_TCP | - MLX4_IB_RX_HASH_DST_PORT_TCP | - MLX4_IB_RX_HASH_SRC_PORT_UDP | - MLX4_IB_RX_HASH_DST_PORT_UDP | - MLX4_IB_RX_HASH_INNER)) { + if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | + MLX4_IB_RX_HASH_DST_IPV4 | + MLX4_IB_RX_HASH_SRC_IPV6 | + MLX4_IB_RX_HASH_DST_IPV6 | + MLX4_IB_RX_HASH_SRC_PORT_TCP | + MLX4_IB_RX_HASH_DST_PORT_TCP | + MLX4_IB_RX_HASH_SRC_PORT_UDP | + MLX4_IB_RX_HASH_DST_PORT_UDP | + MLX4_IB_RX_HASH_INNER)) { pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", ucmd->rx_hash_fields_mask); return (-EOPNOTSUPP); @@ -886,8 +925,12 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, } shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); - err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); + if (shift < 0) { + err = shift; + goto err_buf; + } + err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); if (err) goto err_buf; @@ -1069,8 +1112,12 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, } shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); - err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); + if (shift < 0) { + err = shift; + goto err_buf; + } + err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); if (err) goto err_buf; @@ -1605,7 +1652,8 @@ int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, sqp->roce_v2_gsi = ib_create_qp(pd, init_attr); if (IS_ERR(sqp->roce_v2_gsi)) { - pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi)); + pr_err("Failed to create GSI QP for RoCEv2 (%pe)\n", + sqp->roce_v2_gsi); sqp->roce_v2_gsi = NULL; } else { to_mqp(sqp->roce_v2_gsi)->flags |= @@ -4468,3 +4516,17 @@ void mlx4_ib_drain_rq(struct ib_qp *qp) handle_drain_completion(cq, &rdrain, dev); } + +int mlx4_ib_qp_event_init(void) +{ + mlx4_ib_qp_event_wq = alloc_ordered_workqueue("mlx4_ib_qp_event_wq", 0); + if (!mlx4_ib_qp_event_wq) + return -ENOMEM; + + return 0; +} + +void mlx4_ib_qp_event_cleanup(void) +{ + destroy_workqueue(mlx4_ib_qp_event_wq); +} diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 24ee79aa2122..88f534cf690e 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, static int add_port_entries(struct mlx4_ib_dev *device, int port_num) { int i; - char buff[11]; + char buff[12]; struct mlx4_ib_iov_port *port = NULL; int ret = 0 ; struct ib_port_attr attr; |
