diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_cq.c')
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_cq.c | 150 |
1 files changed, 94 insertions, 56 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 3e2d5047cdb3..0e92956e84cf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -44,30 +44,45 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) int mlx4_en_create_cq(struct mlx4_en_priv *priv, - struct mlx4_en_cq *cq, - int entries, int ring, enum cq_type mode) + struct mlx4_en_cq **pcq, + int entries, int ring, enum cq_type mode, + int node) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq; int err; + cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); + if (!cq) { + en_err(priv, "Failed to allocate CQ structure\n"); + return -ENOMEM; + } + cq->size = entries; cq->buf_size = cq->size * mdev->dev->caps.cqe_size; cq->ring = ring; - cq->is_tx = mode; - spin_lock_init(&cq->lock); + cq->type = mode; + cq->vector = mdev->dev->caps.num_comp_vectors; + /* Allocate HW buffers on provided NUMA node. + * dev->numa_node is used in mtt range allocation flow. + */ + set_dev_node(&mdev->dev->persist->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, - cq->buf_size, 2 * PAGE_SIZE); + cq->buf_size); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) - return err; + goto err_cq; - err = mlx4_en_map_buffer(&cq->wqres.buf); - if (err) - mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); - else - cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf; + cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf; + *pcq = cq; + return 0; + +err_cq: + kfree(cq); + *pcq = NULL; return err; } @@ -75,15 +90,9 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int cq_idx) { struct mlx4_en_dev *mdev = priv->mdev; - int err = 0; - char name[25]; + int irq, err = 0; int timestamp_en = 0; - struct cpu_rmap *rmap = -#ifdef CONFIG_RFS_ACCEL - priv->dev->rx_cpu_rmap; -#else - NULL; -#endif + bool assigned_eq = false; cq->dev = mdev->pndev[priv->port]; cq->mcq.set_ci_db = cq->wqres.db.db; @@ -92,79 +101,110 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, *cq->mcq.arm_db = 0; memset(cq->buf, 0, cq->buf_size); - if (cq->is_tx == RX) { - if (mdev->dev->caps.comp_pool) { - if (!cq->vector) { - sprintf(name, "%s-%d", priv->dev->name, - cq->ring); - /* Set IRQ for specific name (per ring) */ - if (mlx4_assign_eq(mdev->dev, name, rmap, - &cq->vector)) { - cq->vector = (cq->ring + 1 + priv->port) - % mdev->dev->caps.num_comp_vectors; - mlx4_warn(mdev, "Failed Assigning an EQ to " - "%s ,Falling back to legacy EQ's\n", - name); - } + if (cq->type == RX) { + if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, + cq->vector)) { + cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); + + err = mlx4_assign_eq(mdev->dev, priv->port, + &cq->vector); + if (err) { + mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n", + cq->vector); + goto free_eq; } - } else { - cq->vector = (cq->ring + 1 + priv->port) % - mdev->dev->caps.num_comp_vectors; + + assigned_eq = true; } + irq = mlx4_eq_get_irq(mdev->dev, cq->vector); + cq->aff_mask = irq_get_effective_affinity_mask(irq); } else { /* For TX we use the same irq per ring we assigned for the RX */ struct mlx4_en_cq *rx_cq; cq_idx = cq_idx % priv->rx_ring_num; - rx_cq = &priv->rx_cq[cq_idx]; + rx_cq = priv->rx_cq[cq_idx]; cq->vector = rx_cq->vector; + irq = mlx4_eq_get_irq(mdev->dev, cq->vector); } - if (!cq->is_tx) - cq->size = priv->rx_ring[cq->ring].actual_size; + if (cq->type == RX) + cq->size = priv->rx_ring[cq->ring]->actual_size; - if ((cq->is_tx && priv->hwtstamp_config.tx_type) || - (!cq->is_tx && priv->hwtstamp_config.rx_filter)) + if ((cq->type != RX && priv->hwtstamp_config.tx_type) || + (cq->type == RX && priv->hwtstamp_config.rx_filter)) timestamp_en = 1; + cq->mcq.usage = MLX4_RES_USAGE_DRIVER; err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, - cq->vector, 0, timestamp_en); + cq->vector, 0, timestamp_en, &cq->wqres.buf, false); if (err) - return err; + goto free_eq; - cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; + cq->cq_idx = cq_idx; cq->mcq.event = mlx4_en_cq_event; - if (!cq->is_tx) { - netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); - napi_hash_add(&cq->napi); + switch (cq->type) { + case TX: + cq->mcq.comp = mlx4_en_tx_irq; + netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq); + netif_napi_set_irq(&cq->napi, irq); + napi_enable(&cq->napi); + netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_TX, &cq->napi); + break; + case RX: + cq->mcq.comp = mlx4_en_rx_irq; + netif_napi_add_config(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, + cq_idx); + netif_napi_set_irq(&cq->napi, irq); napi_enable(&cq->napi); + netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi); + break; + case TX_XDP: + /* nothing regarding napi, it's shared with rx ring */ + cq->xdp_busy = false; + break; } return 0; + +free_eq: + if (assigned_eq) + mlx4_release_eq(mdev->dev, cq->vector); + cq->vector = mdev->dev->caps.num_comp_vectors; + return err; } -void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) { struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq = *pcq; - mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); - if (priv->mdev->dev->caps.comp_pool && cq->vector) + if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && + cq->type == RX) mlx4_release_eq(priv->mdev->dev, cq->vector); cq->vector = 0; cq->buf_size = 0; cq->buf = NULL; + kfree(cq); + *pcq = NULL; } void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { - if (!cq->is_tx) { + if (cq->type != TX_XDP) { + enum netdev_queue_type qtype; + + if (cq->type == RX) + qtype = NETDEV_QUEUE_TYPE_RX; + else + qtype = NETDEV_QUEUE_TYPE_TX; + + netif_queue_set_napi(cq->dev, cq->cq_idx, qtype, NULL); napi_disable(&cq->napi); - napi_hash_del(&cq->napi); - synchronize_rcu(); netif_napi_del(&cq->napi); } @@ -178,12 +218,10 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) cq->moder_cnt, cq->moder_time); } -int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, &priv->mdev->uar_lock); - - return 0; } |
