summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/nvme/host/pci.c7
-rw-r--r--include/linux/interrupt.h9
-rw-r--r--kernel/irq/affinity.c16
3 files changed, 21 insertions, 11 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9bc585415d9b..21ffd671b6ed 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2081,12 +2081,11 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int irq_sets[2];
struct irq_affinity affd = {
- .pre_vectors = 1,
- .nr_sets = ARRAY_SIZE(irq_sets),
- .sets = irq_sets,
+ .pre_vectors = 1,
+ .nr_sets = 2,
};
+ unsigned int *irq_sets = affd.set_size;
int result = 0;
unsigned int irq_queues, this_p_queues;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 35e7389c2011..5afdfd5dc39b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -241,20 +241,23 @@ struct irq_affinity_notify {
void (*release)(struct kref *ref);
};
+#define IRQ_AFFINITY_MAX_SETS 4
+
/**
* struct irq_affinity - Description for automatic irq affinity assignements
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
- * @nr_sets: Length of passed in *sets array
- * @sets: Number of affinitized sets
+ * @nr_sets: The number of interrupt sets for which affinity
+ * spreading is required
+ * @set_size: Array holding the size of each interrupt set
*/
struct irq_affinity {
unsigned int pre_vectors;
unsigned int post_vectors;
unsigned int nr_sets;
- unsigned int *sets;
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
};
/**
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 82e8799374e9..278289c091bb 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -238,9 +238,10 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
* Returns the irq_affinity_desc pointer or NULL if allocation failed.
*/
struct irq_affinity_desc *
-irq_create_affinity_masks(unsigned int nvecs, const struct irq_affinity *affd)
+irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
{
unsigned int affvecs, curvec, usedvecs, nr_sets, i;
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
struct irq_affinity_desc *masks = NULL;
/*
@@ -250,6 +251,9 @@ irq_create_affinity_masks(unsigned int nvecs, const struct irq_affinity *affd)
if (nvecs == affd->pre_vectors + affd->post_vectors)
return NULL;
+ if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
+ return NULL;
+
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
if (!masks)
return NULL;
@@ -263,11 +267,15 @@ irq_create_affinity_masks(unsigned int nvecs, const struct irq_affinity *affd)
*/
affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
nr_sets = affd->nr_sets;
- if (!nr_sets)
+ if (!nr_sets) {
nr_sets = 1;
+ set_size[0] = affvecs;
+ } else {
+ memcpy(set_size, affd->set_size, nr_sets * sizeof(unsigned int));
+ }
for (i = 0, usedvecs = 0; i < nr_sets; i++) {
- unsigned int this_vecs = affd->sets ? affd->sets[i] : affvecs;
+ unsigned int this_vecs = set_size[i];
int ret;
ret = irq_build_affinity_masks(affd, curvec, this_vecs,
@@ -314,7 +322,7 @@ unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
unsigned int i;
for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)
- set_vecs += affd->sets[i];
+ set_vecs += affd->set_size[i];
} else {
get_online_cpus();
set_vecs = cpumask_weight(cpu_possible_mask);