summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-08-08 16:28:28 -0700
committerJakub Kicinski <kuba@kernel.org>2023-08-08 16:28:29 -0700
commitf5f502a3ea349bc549dd42fb2aca7daaccc9bd03 (patch)
treebf02cdf1ec64128303d922b66e877ec602cc6a52 /drivers/net
parent2c2b88748fd5028a7771a864d46b1b78fb436a07 (diff)
parentb56fb19c337951fc4daba646fab2c50bb4c41c58 (diff)
Merge tag 'mlx5-updates-2023-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says: ==================== mlx5-updates-2023-08-07 1) Few cleanups 2) Dynamic completion EQs The driver creates completion EQs for all vectors directly on driver load, even if those EQs will not be utilized later on. To allow more flexibility in managing completion EQs and to reduce the memory overhead of driver load, this series will adjust completion EQs creation to be dynamic. Meaning, completion EQs will be created only when needed. Patch #1 introduces a counter for tracking the current number of completion EQs. Patches #2-6 refactor the existing infrastructure of managing completion EQs and completion IRQs to be compatible with per-vector allocation/release requests. Patches #7-8 modify the CPU-to-IRQ affinity calculation to be resilient in case the affinity is requested but completion IRQ is not allocated yet. Patch #9 function rename. Patch #10 handles the corner case of SF performing an IRQ request when no SF IRQ pool is found, and no PF IRQ exists for the same vector. Patch #11 modify driver to use dynamically allocate completion EQs. * tag 'mlx5-updates-2023-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5: Bridge, Only handle registered netdev bridge events net/mlx5: E-Switch, Remove redundant arg ignore_flow_lvl net/mlx5: Fix typo reminder -> remainder net/mlx5: remove many unnecessary NULL values net/mlx5: Allocate completion EQs dynamically net/mlx5: Handle SF IRQ request in the absence of SF IRQ pool net/mlx5: Rename mlx5_comp_vectors_count() to mlx5_comp_vectors_max() net/mlx5: Add IRQ vector to CPU lookup function net/mlx5: Introduce mlx5_cpumask_default_spread net/mlx5: Implement single completion EQ create/destroy methods net/mlx5: Use xarray to store and manage completion EQs net/mlx5: Refactor completion IRQ request/release handlers in EQ layer net/mlx5: Use xarray to store and manage completion IRQs net/mlx5: Refactor completion IRQ request/release API net/mlx5: Track the current number of completion EQs ==================== Link: https://lore.kernel.org/r/20230807175642.20834-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c360
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c4
16 files changed, 300 insertions, 270 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0f8f70b91485..c1deb04ba7e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -193,7 +193,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS :
- min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
+ min_t(int, mlx5_comp_vectors_max(mdev), MLX5E_MAX_NUM_CHANNELS);
}
/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 560800246573..0fef853eab62 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -77,6 +77,10 @@ mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_es
return NULL;
priv = netdev_priv(dev);
+
+ if (!priv->mdev->priv.eswitch->br_offloads)
+ return NULL;
+
rpriv = priv->ppriv;
*vport_num = rpriv->rep->vport;
*esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 698647cc8c0f..5620d9f97518 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -127,7 +127,7 @@ static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
{
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
+ int cpu = mlx5_comp_vector_get_cpu(priv->mdev, 0);
struct net_device *netdev = priv->netdev;
struct mlx5e_trap *t;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ca80c220e4b0..61a5ddd6e585 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1989,7 +1989,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int eqn;
int err;
- err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
+ err = mlx5_comp_eqn_get(mdev, param->eq_ix, &eqn);
if (err)
return err;
@@ -2445,14 +2445,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
+ int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix);
struct net_device *netdev = priv->netdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
unsigned int irq;
int err;
- err = mlx5_vector2irqn(priv->mdev, ix, &irq);
+ err = mlx5_comp_irqn_get(priv->mdev, ix, &irq);
if (err)
return err;
@@ -2856,13 +2856,13 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev = priv->mdev;
int num_comp_vectors, ix, irq;
- num_comp_vectors = mlx5_comp_vectors_count(mdev);
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
for (ix = 0; ix < params->num_channels; ix++) {
cpumask_clear(priv->scratchpad.cpumask);
for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
+ int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 3db4866d7880..ea0405e0a43f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -47,7 +47,7 @@ enum {
static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
struct mlx5_eq_table {
- struct list_head comp_eqs_list;
+ struct xarray comp_eqs;
struct mlx5_eq_async pages_eq;
struct mlx5_eq_async cmd_eq;
struct mlx5_eq_async async_eq;
@@ -58,11 +58,14 @@ struct mlx5_eq_table {
struct mlx5_nb cq_err_nb;
struct mutex lock; /* sync async eqs creations */
- int num_comp_eqs;
+ struct mutex comp_lock; /* sync comp eqs creations */
+ int curr_comp_eqs;
+ int max_comp_eqs;
struct mlx5_irq_table *irq_table;
- struct mlx5_irq **comp_irqs;
+ struct xarray comp_irqs;
struct mlx5_irq *ctrl_irq;
struct cpu_rmap *rmap;
+ struct cpumask used_cpus;
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -452,13 +455,22 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
eq_table->irq_table = mlx5_irq_table_get(dev);
+ cpumask_clear(&eq_table->used_cpus);
+ xa_init(&eq_table->comp_eqs);
+ xa_init(&eq_table->comp_irqs);
+ mutex_init(&eq_table->comp_lock);
+ eq_table->curr_comp_eqs = 0;
return 0;
}
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+
mlx5_eq_debugfs_cleanup(dev);
- kvfree(dev->priv.eq_table);
+ xa_destroy(&table->comp_irqs);
+ xa_destroy(&table->comp_eqs);
+ kvfree(table);
}
/* Async EQs */
@@ -803,88 +815,112 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
-static void comp_irqs_release_pci(struct mlx5_core_dev *dev)
+static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq *irq;
+
+ irq = xa_load(&table->comp_irqs, vecidx);
+ if (!irq)
+ return;
- mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs);
+ xa_erase(&table->comp_irqs, vecidx);
+ mlx5_irq_release_vector(irq);
}
-static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
+static int mlx5_cpumask_default_spread(int numa_node, int index)
{
- struct mlx5_eq_table *table = dev->priv.eq_table;
const struct cpumask *prev = cpu_none_mask;
const struct cpumask *mask;
- int ncomp_eqs;
- u16 *cpus;
- int ret;
+ int found_cpu = 0;
+ int i = 0;
int cpu;
- int i;
-
- ncomp_eqs = table->num_comp_eqs;
- cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
- if (!cpus)
- return -ENOMEM;
- i = 0;
rcu_read_lock();
- for_each_numa_hop_mask(mask, dev->priv.numa_node) {
+ for_each_numa_hop_mask(mask, numa_node) {
for_each_cpu_andnot(cpu, mask, prev) {
- cpus[i] = cpu;
- if (++i == ncomp_eqs)
+ if (i++ == index) {
+ found_cpu = cpu;
goto spread_done;
+ }
}
prev = mask;
}
+
spread_done:
rcu_read_unlock();
- ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs, &table->rmap);
- kfree(cpus);
- return ret;
+ return found_cpu;
}
-static void comp_irqs_release_sf(struct mlx5_core_dev *dev)
+static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = dev->priv.eq_table;
-
- mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs);
+#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(dev))
+ return dev->priv.parent_mdev->priv.eq_table->rmap;
+#endif
+ return dev->priv.eq_table->rmap;
+#else
+ return NULL;
+#endif
}
-static int comp_irqs_request_sf(struct mlx5_core_dev *dev)
+static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int ncomp_eqs = table->num_comp_eqs;
+ struct cpu_rmap *rmap;
+ struct mlx5_irq *irq;
+ int cpu;
+
+ rmap = mlx5_eq_table_get_pci_rmap(dev);
+ cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+ irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
- return mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
+ return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
-static void comp_irqs_release(struct mlx5_core_dev *dev)
+static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq *irq;
- mlx5_core_is_sf(dev) ? comp_irqs_release_sf(dev) :
- comp_irqs_release_pci(dev);
+ irq = xa_load(&table->comp_irqs, vecidx);
+ if (!irq)
+ return;
- kfree(table->comp_irqs);
+ xa_erase(&table->comp_irqs, vecidx);
+ mlx5_irq_affinity_irq_release(dev, irq);
}
-static int comp_irqs_request(struct mlx5_core_dev *dev)
+static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int ncomp_eqs;
- int ret;
+ struct mlx5_irq *irq;
- ncomp_eqs = table->num_comp_eqs;
- table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL);
- if (!table->comp_irqs)
- return -ENOMEM;
+ irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
+ if (IS_ERR(irq)) {
+ /* In case SF irq pool does not exist, fallback to the PF irqs*/
+ if (PTR_ERR(irq) == -ENOENT)
+ return comp_irq_request_pci(dev, vecidx);
+
+ return PTR_ERR(irq);
+ }
+
+ return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
+}
- ret = mlx5_core_is_sf(dev) ? comp_irqs_request_sf(dev) :
- comp_irqs_request_pci(dev);
- if (ret < 0)
- kfree(table->comp_irqs);
+static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx)
+{
+ mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) :
+ comp_irq_release_pci(dev, vecidx);
+}
- return ret;
+static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx)
+{
+ return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) :
+ comp_irq_request_pci(dev, vecidx);
}
#ifdef CONFIG_RFS_ACCEL
@@ -901,7 +937,7 @@ static int alloc_rmap(struct mlx5_core_dev *mdev)
if (mlx5_core_is_sf(mdev))
return 0;
- eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
+ eq_table->rmap = alloc_irq_cpu_rmap(eq_table->max_comp_eqs);
if (!eq_table->rmap)
return -ENOMEM;
return 0;
@@ -921,22 +957,19 @@ static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; }
static void free_rmap(struct mlx5_core_dev *mdev) {}
#endif
-static void destroy_comp_eqs(struct mlx5_core_dev *dev)
+static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_eq_comp *eq, *n;
-
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
- list_del(&eq->list);
- mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
- if (destroy_unmap_eq(dev, &eq->core))
- mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
- eq->core.eqn);
- tasklet_disable(&eq->tasklet_ctx.task);
- kfree(eq);
- }
- comp_irqs_release(dev);
- free_rmap(dev);
+
+ xa_erase(&table->comp_eqs, vecidx);
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
+ if (destroy_unmap_eq(dev, &eq->core))
+ mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
+ eq->core.eqn);
+ tasklet_disable(&eq->tasklet_ctx.task);
+ kfree(eq);
+ comp_irq_release(dev, vecidx);
+ table->curr_comp_eqs--;
}
static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
@@ -954,129 +987,149 @@ static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
return MLX5_COMP_EQ_SIZE;
}
-static int create_comp_eqs(struct mlx5_core_dev *dev)
+/* Must be called with EQ table comp_lock held */
+static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_param param = {};
struct mlx5_eq_comp *eq;
- int ncomp_eqs;
+ struct mlx5_irq *irq;
int nent;
int err;
- int i;
- err = alloc_rmap(dev);
+ lockdep_assert_held(&table->comp_lock);
+ if (table->curr_comp_eqs == table->max_comp_eqs) {
+ mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n",
+ table->max_comp_eqs);
+ return -ENOMEM;
+ }
+
+ err = comp_irq_request(dev, vecidx);
if (err)
return err;
- ncomp_eqs = comp_irqs_request(dev);
- if (ncomp_eqs < 0) {
- err = ncomp_eqs;
- goto err_irqs_req;
- }
-
- INIT_LIST_HEAD(&table->comp_eqs_list);
nent = comp_eq_depth_devlink_param_get(dev);
- for (i = 0; i < ncomp_eqs; i++) {
- struct mlx5_eq_param param = {};
+ eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
+ if (!eq) {
+ err = -ENOMEM;
+ goto clean_irq;
+ }
- eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
- if (!eq) {
- err = -ENOMEM;
- goto clean;
- }
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
- INIT_LIST_HEAD(&eq->tasklet_ctx.list);
- INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
- spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
-
- eq->irq_nb.notifier_call = mlx5_eq_comp_int;
- param = (struct mlx5_eq_param) {
- .irq = table->comp_irqs[i],
- .nent = nent,
- };
-
- err = create_map_eq(dev, &eq->core, &param);
- if (err)
- goto clean_eq;
- err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
- if (err) {
- destroy_unmap_eq(dev, &eq->core);
- goto clean_eq;
- }
+ irq = xa_load(&table->comp_irqs, vecidx);
+ eq->irq_nb.notifier_call = mlx5_eq_comp_int;
+ param = (struct mlx5_eq_param) {
+ .irq = irq,
+ .nent = nent,
+ };
- mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
- /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
- list_add_tail(&eq->list, &table->comp_eqs_list);
+ err = create_map_eq(dev, &eq->core, &param);
+ if (err)
+ goto clean_eq;
+ err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
+ if (err) {
+ destroy_unmap_eq(dev, &eq->core);
+ goto clean_eq;
}
- table->num_comp_eqs = ncomp_eqs;
- return 0;
+ mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
+ err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL));
+ if (err)
+ goto disable_eq;
+
+ table->curr_comp_eqs++;
+ return eq->core.eqn;
+disable_eq:
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
clean_eq:
kfree(eq);
-clean:
- destroy_comp_eqs(dev);
-err_irqs_req:
- free_rmap(dev);
+clean_irq:
+ comp_irq_release(dev, vecidx);
return err;
}
-static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn)
+int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
- int err = -ENOENT;
- int i = 0;
+ int ret = 0;
- list_for_each_entry(eq, &table->comp_eqs_list, list) {
- if (i++ == vector) {
- if (irqn)
- *irqn = eq->core.irqn;
- if (eqn)
- *eqn = eq->core.eqn;
- err = 0;
- break;
- }
+ mutex_lock(&table->comp_lock);
+ eq = xa_load(&table->comp_eqs, vecidx);
+ if (eq) {
+ *eqn = eq->core.eqn;
+ goto out;
}
- return err;
-}
+ ret = create_comp_eq(dev, vecidx);
+ if (ret < 0) {
+ mutex_unlock(&table->comp_lock);
+ return ret;
+ }
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
-{
- return vector2eqnirqn(dev, vector, eqn, NULL);
+ *eqn = ret;
+out:
+ mutex_unlock(&table->comp_lock);
+ return 0;
}
-EXPORT_SYMBOL(mlx5_vector2eqn);
+EXPORT_SYMBOL(mlx5_comp_eqn_get);
-int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
+int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
{
- return vector2eqnirqn(dev, vector, NULL, irqn);
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq;
+ int eqn;
+ int err;
+
+ /* Allocate the EQ if not allocated yet */
+ err = mlx5_comp_eqn_get(dev, vector, &eqn);
+ if (err)
+ return err;
+
+ eq = xa_load(&table->comp_eqs, vector);
+ *irqn = eq->core.irqn;
+ return 0;
}
-unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
+unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev)
{
- return dev->priv.eq_table->num_comp_eqs;
+ return dev->priv.eq_table->max_comp_eqs;
}
-EXPORT_SYMBOL(mlx5_comp_vectors_count);
+EXPORT_SYMBOL(mlx5_comp_vectors_max);
-struct cpumask *
+static struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
- int i = 0;
- list_for_each_entry(eq, &table->comp_eqs_list, list) {
- if (i++ == vector)
- return mlx5_irq_get_affinity_mask(eq->core.irq);
- }
+ eq = xa_load(&table->comp_eqs, vector);
+ if (eq)
+ return mlx5_irq_get_affinity_mask(eq->core.irq);
- WARN_ON_ONCE(1);
return NULL;
}
-EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
+
+int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
+{
+ struct cpumask *mask;
+ int cpu;
+
+ mask = mlx5_comp_irq_get_affinity_mask(dev, vector);
+ if (mask)
+ cpu = cpumask_first(mask);
+ else
+ cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+
+ return cpu;
+}
+EXPORT_SYMBOL(mlx5_comp_vector_get_cpu);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
@@ -1089,11 +1142,11 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
+ unsigned long index;
- list_for_each_entry(eq, &table->comp_eqs_list, list) {
+ xa_for_each(&table->comp_eqs, index, eq)
if (eq->core.eqn == eqn)
return eq;
- }
return ERR_PTR(-ENOENT);
}
@@ -1101,11 +1154,7 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = dev->priv.eq_table;
-
- mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
mlx5_irq_table_free_irqs(dev);
- mutex_unlock(&table->lock);
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
@@ -1148,22 +1197,22 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
- eq_table->num_comp_eqs = get_num_eqs(dev);
+ eq_table->max_comp_eqs = get_num_eqs(dev);
err = create_async_eqs(dev);
if (err) {
mlx5_core_err(dev, "Failed to create async EQs\n");
goto err_async_eqs;
}
- err = create_comp_eqs(dev);
+ err = alloc_rmap(dev);
if (err) {
- mlx5_core_err(dev, "Failed to create completion EQs\n");
- goto err_comp_eqs;
+ mlx5_core_err(dev, "Failed to allocate rmap\n");
+ goto err_rmap;
}
return 0;
-err_comp_eqs:
+err_rmap:
destroy_async_eqs(dev);
err_async_eqs:
return err;
@@ -1171,7 +1220,14 @@ err_async_eqs:
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
{
- destroy_comp_eqs(dev);
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq;
+ unsigned long index;
+
+ xa_for_each(&table->comp_eqs, index, eq)
+ destroy_comp_eq(dev, eq, index);
+
+ free_rmap(dev);
destroy_async_eqs(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 7c79476cc5f9..1887a24ee414 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -740,7 +740,7 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *name,
u64 *rate, struct netlink_ext_ack *extack)
{
- u32 link_speed_max, reminder;
+ u32 link_speed_max, remainder;
u64 value;
int err;
@@ -750,8 +750,8 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return err;
}
- value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &reminder);
- if (reminder) {
+ value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
+ if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
name, *rate);
NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 723dff87e6d5..e391535e1ab1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -375,7 +375,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- bool ignore_flow_lvl,
int *i)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
@@ -385,8 +384,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
return -EOPNOTSUPP;
for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
- if (ignore_flow_lvl)
- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
@@ -569,7 +567,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
- err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 12abe991583a..c4de6bf8d1b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -445,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
goto err_cqwq;
}
- err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
+ err = mlx5_comp_eqn_get(mdev, smp_processor_id(), &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 39c03dcbd196..e5c1012921d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -57,7 +57,7 @@ static const char * const mlx5_fpga_qp_error_strings[] = {
};
static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void)
{
- struct mlx5_fpga_device *fdev = NULL;
+ struct mlx5_fpga_device *fdev;
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
if (!fdev)
@@ -252,7 +252,7 @@ out:
int mlx5_fpga_init(struct mlx5_core_dev *mdev)
{
- struct mlx5_fpga_device *fdev = NULL;
+ struct mlx5_fpga_device *fdev;
if (!MLX5_CAP_GEN(mdev, fpga)) {
mlx5_core_dbg(mdev, "FPGA capability not present\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index fa467335526e..047d5fed5f89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -156,67 +156,57 @@ unlock:
return least_loaded_irq;
}
-void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
- int num_irqs)
+void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
- int i;
-
- for (i = 0; i < num_irqs; i++) {
- int cpu = cpumask_first(mlx5_irq_get_affinity_mask(irqs[i]));
+ int cpu;
- synchronize_irq(pci_irq_vector(pool->dev->pdev,
- mlx5_irq_get_index(irqs[i])));
- if (mlx5_irq_put(irqs[i]))
- if (pool->irqs_per_cpu)
- cpu_put(pool, cpu);
- }
+ cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
+ synchronize_irq(pci_irq_vector(pool->dev->pdev,
+ mlx5_irq_get_index(irq)));
+ if (mlx5_irq_put(irq))
+ if (pool->irqs_per_cpu)
+ cpu_put(pool, cpu);
}
/**
- * mlx5_irq_affinity_irqs_request_auto - request one or more IRQs for mlx5 device.
- * @dev: mlx5 device that is requesting the IRQs.
- * @nirqs: number of IRQs to request.
- * @irqs: an output array of IRQs pointers.
+ * mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQ.
+ * @used_cpus: cpumask of bounded cpus by the device
+ * @vecidx: vector index to request an IRQ for.
*
* Each IRQ is bounded to at most 1 CPU.
- * This function is requesting IRQs according to the default assignment.
+ * This function is requesting an IRQ according to the default assignment.
* The default assignment policy is:
- * - in each iteration, request the least loaded IRQ which is not bound to any
+ * - request the least loaded IRQ which is not bound to any
* CPU of the previous IRQs requested.
*
- * This function returns the number of IRQs requested, (which might be smaller than
- * @nirqs), if successful, or a negative error code in case of an error.
+ * On success, this function updates used_cpus mask and returns an irq pointer.
+ * In case of an error, an appropriate error pointer is returned.
*/
-int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
- struct mlx5_irq **irqs)
+struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ struct cpumask *used_cpus, u16 vecidx)
{
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
- int i = 0;
+
+ if (!mlx5_irq_pool_is_sf_pool(pool))
+ return ERR_PTR(-ENOENT);
af_desc.is_managed = 1;
cpumask_copy(&af_desc.mask, cpu_online_mask);
- for (i = 0; i < nirqs; i++) {
- if (mlx5_irq_pool_is_sf_pool(pool))
- irq = mlx5_irq_affinity_request(pool, &af_desc);
- else
- /* In case SF pool doesn't exists, fallback to the PF IRQs.
- * The PF IRQs are already allocated and binded to CPU
- * at this point. Hence, only an index is needed.
- */
- irq = mlx5_irq_request(dev, i, NULL, NULL);
- if (IS_ERR(irq))
- break;
- irqs[i] = irq;
- cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), &af_desc.mask);
- mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
- pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
- cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
- mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
- }
- if (!i)
- return PTR_ERR(irq);
- return i;
+ cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
+ irq = mlx5_irq_affinity_request(pool, &af_desc);
+
+ if (IS_ERR(irq))
+ return irq;
+
+ cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
+ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
+ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
+ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
+ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+
+ return irq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 5a80fb7dbbca..40c7be124041 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -81,7 +81,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
int inlen, eqn;
int err;
- err = mlx5_vector2eqn(mdev, 0, &eqn);
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index d3d628b862f3..69a75459775d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -104,6 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
-int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
+int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
index 4047629a876b..30564d9b00e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
@@ -40,7 +40,7 @@ struct mlx5_hv_vhca_agent {
struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
{
- struct mlx5_hv_vhca *hv_vhca = NULL;
+ struct mlx5_hv_vhca *hv_vhca;
hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL);
if (!hv_vhca)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index aa403a5ea34e..1088114e905d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -29,9 +29,9 @@ void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct irq_affinity_desc *af_desc,
struct cpu_rmap **rmap);
-int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
- struct mlx5_irq **irqs, struct cpu_rmap **rmap);
-void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs);
+struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
+ u16 vecidx, struct cpu_rmap **rmap);
+void mlx5_irq_release_vector(struct mlx5_irq *irq);
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
@@ -39,17 +39,17 @@ int mlx5_irq_get_index(struct mlx5_irq *irq);
struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
-int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
- struct mlx5_irq **irqs);
+struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ struct cpumask *used_cpus, u16 vecidx);
struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
struct irq_affinity_desc *af_desc);
-void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
- int num_irqs);
+void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq);
#else
-static inline int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
- struct mlx5_irq **irqs)
+static inline
+struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ struct cpumask *used_cpus, u16 vecidx)
{
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct mlx5_irq *
@@ -58,7 +58,9 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev,
- struct mlx5_irq **irqs, int num_irqs) {}
+static inline
+void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
+{
+}
#endif
#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index cba2a4afb5fd..33a133c9918c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -432,19 +432,10 @@ static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
return pool ? pool : irq_table->pcif_pool;
}
-/**
- * mlx5_irqs_release - release one or more IRQs back to the system.
- * @irqs: IRQs to be released.
- * @nirqs: number of IRQs to be released.
- */
-static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
+static void _mlx5_irq_release(struct mlx5_irq *irq)
{
- int i;
-
- for (i = 0; i < nirqs; i++) {
- synchronize_irq(irqs[i]->map.virq);
- mlx5_irq_put(irqs[i]);
- }
+ synchronize_irq(irq->map.virq);
+ mlx5_irq_put(irq);
}
/**
@@ -453,7 +444,7 @@ static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
*/
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
{
- mlx5_irqs_release(&ctrl_irq, 1);
+ _mlx5_irq_release(ctrl_irq);
}
/**
@@ -569,53 +560,42 @@ void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map)
EXPORT_SYMBOL(mlx5_msix_free);
/**
- * mlx5_irqs_release_vectors - release one or more IRQs back to the system.
- * @irqs: IRQs to be released.
- * @nirqs: number of IRQs to be released.
+ * mlx5_irq_release_vector - release one IRQ back to the system.
+ * @irq: the irq to release.
*/
-void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
+void mlx5_irq_release_vector(struct mlx5_irq *irq)
{
- mlx5_irqs_release(irqs, nirqs);
+ _mlx5_irq_release(irq);
}
/**
- * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device.
- * @dev: mlx5 device that is requesting the IRQs.
- * @cpus: CPUs array for binding the IRQs
- * @nirqs: number of IRQs to request.
- * @irqs: an output array of IRQs pointers.
+ * mlx5_irq_request_vector - request one IRQ for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQ.
+ * @cpu: CPU to bind the IRQ to.
+ * @vecidx: vector index to request an IRQ for.
* @rmap: pointer to reverse map pointer for completion interrupts
*
* Each IRQ is bound to at most 1 CPU.
- * This function is requests nirqs IRQs, starting from @vecidx.
+ * This function is requests one IRQ, for the given @vecidx.
*
- * This function returns the number of IRQs requested, (which might be smaller than
- * @nirqs), if successful, or a negative error code in case of an error.
+ * This function returns a pointer to the irq on success, or an error pointer
+ * in case of an error.
*/
-int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
- struct mlx5_irq **irqs, struct cpu_rmap **rmap)
+struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
+ u16 vecidx, struct cpu_rmap **rmap)
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
- struct mlx5_irq *irq;
int offset = 1;
- int i;
if (!pool->xa_num_irqs.max)
offset = 0;
af_desc.is_managed = false;
- for (i = 0; i < nirqs; i++) {
- cpumask_clear(&af_desc.mask);
- cpumask_set_cpu(cpus[i], &af_desc.mask);
- irq = mlx5_irq_request(dev, i + offset, &af_desc, rmap);
- if (IS_ERR(irq))
- break;
- irqs[i] = irq;
- }
-
- return i ? i : PTR_ERR(irq);
+ cpumask_clear(&af_desc.mask);
+ cpumask_set_cpu(cpu, &af_desc.mask);
+ return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
}
static struct mlx5_irq_pool *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 4a5ae86e2b62..6fa06ba2d346 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -1096,8 +1096,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
- err = mlx5_vector2eqn(mdev, vector, &eqn);
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
+ err = mlx5_comp_eqn_get(mdev, vector, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;