summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c74
1 files changed, 26 insertions, 48 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index cba2a4afb5fd..653648216730 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -259,8 +259,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
int err;
irq = kzalloc(sizeof(*irq), GFP_KERNEL);
- if (!irq)
+ if (!irq || !zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
+ kfree(irq);
return ERR_PTR(-ENOMEM);
+ }
+
if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) {
/* The vector at index 0 is always statically allocated. If
* dynamic irq is not supported all vectors are statically
@@ -297,11 +300,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
goto err_req_irq;
}
- if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
- mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
- err = -ENOMEM;
- goto err_cpumask;
- }
+
if (af_desc) {
cpumask_copy(irq->mask, &af_desc->mask);
irq_set_affinity_and_hint(irq->map.virq, irq->mask);
@@ -319,8 +318,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
err_xa:
if (af_desc)
irq_update_affinity_hint(irq->map.virq, NULL);
- free_cpumask_var(irq->mask);
-err_cpumask:
free_irq(irq->map.virq, &irq->nh);
err_req_irq:
#ifdef CONFIG_RFS_ACCEL
@@ -333,6 +330,7 @@ err_irq_rmap:
if (i && pci_msix_can_alloc_dyn(dev->pdev))
pci_msix_free_irq(dev->pdev, irq->map);
err_alloc_irq:
+ free_cpumask_var(irq->mask);
kfree(irq);
return ERR_PTR(err);
}
@@ -432,19 +430,10 @@ static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
return pool ? pool : irq_table->pcif_pool;
}
-/**
- * mlx5_irqs_release - release one or more IRQs back to the system.
- * @irqs: IRQs to be released.
- * @nirqs: number of IRQs to be released.
- */
-static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
+static void _mlx5_irq_release(struct mlx5_irq *irq)
{
- int i;
-
- for (i = 0; i < nirqs; i++) {
- synchronize_irq(irqs[i]->map.virq);
- mlx5_irq_put(irqs[i]);
- }
+ synchronize_irq(irq->map.virq);
+ mlx5_irq_put(irq);
}
/**
@@ -453,7 +442,7 @@ static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
*/
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
{
- mlx5_irqs_release(&ctrl_irq, 1);
+ _mlx5_irq_release(ctrl_irq);
}
/**
@@ -569,53 +558,42 @@ void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map)
EXPORT_SYMBOL(mlx5_msix_free);
/**
- * mlx5_irqs_release_vectors - release one or more IRQs back to the system.
- * @irqs: IRQs to be released.
- * @nirqs: number of IRQs to be released.
+ * mlx5_irq_release_vector - release one IRQ back to the system.
+ * @irq: the irq to release.
*/
-void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
+void mlx5_irq_release_vector(struct mlx5_irq *irq)
{
- mlx5_irqs_release(irqs, nirqs);
+ _mlx5_irq_release(irq);
}
/**
- * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device.
- * @dev: mlx5 device that is requesting the IRQs.
- * @cpus: CPUs array for binding the IRQs
- * @nirqs: number of IRQs to request.
- * @irqs: an output array of IRQs pointers.
+ * mlx5_irq_request_vector - request one IRQ for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQ.
+ * @cpu: CPU to bind the IRQ to.
+ * @vecidx: vector index to request an IRQ for.
* @rmap: pointer to reverse map pointer for completion interrupts
*
* Each IRQ is bound to at most 1 CPU.
- * This function is requests nirqs IRQs, starting from @vecidx.
+ * This function is requests one IRQ, for the given @vecidx.
*
- * This function returns the number of IRQs requested, (which might be smaller than
- * @nirqs), if successful, or a negative error code in case of an error.
+ * This function returns a pointer to the irq on success, or an error pointer
+ * in case of an error.
*/
-int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
- struct mlx5_irq **irqs, struct cpu_rmap **rmap)
+struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
+ u16 vecidx, struct cpu_rmap **rmap)
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
- struct mlx5_irq *irq;
int offset = 1;
- int i;
if (!pool->xa_num_irqs.max)
offset = 0;
af_desc.is_managed = false;
- for (i = 0; i < nirqs; i++) {
- cpumask_clear(&af_desc.mask);
- cpumask_set_cpu(cpus[i], &af_desc.mask);
- irq = mlx5_irq_request(dev, i + offset, &af_desc, rmap);
- if (IS_ERR(irq))
- break;
- irqs[i] = irq;
- }
-
- return i ? i : PTR_ERR(irq);
+ cpumask_clear(&af_desc.mask);
+ cpumask_set_cpu(cpu, &af_desc.mask);
+ return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
}
static struct mlx5_irq_pool *