summaryrefslogtreecommitdiff
path: root/lib/group_cpus.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/group_cpus.c')
-rw-r--r--lib/group_cpus.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
index ee272c4cefcc..6d08ac05f371 100644
--- a/lib/group_cpus.c
+++ b/lib/group_cpus.c
@@ -332,9 +332,11 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
/**
* group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
* @numgrps: number of groups
+ * @nummasks: number of initialized cpumasks
*
* Return: cpumask array if successful, NULL otherwise. And each element
- * includes CPUs assigned to this group
+ * includes CPUs assigned to this group. nummasks contains the number
+ * of initialized masks which can be less than numgrps.
*
* Try to put close CPUs from viewpoint of CPU and NUMA locality into
* same group, and run two-stage grouping:
@@ -344,7 +346,7 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
* We guarantee in the resulted grouping that all CPUs are covered, and
* no same CPU is assigned to multiple groups
*/
-struct cpumask *group_cpus_evenly(unsigned int numgrps)
+struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
{
unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
cpumask_var_t *node_to_cpumask;
@@ -352,6 +354,9 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
int ret = -ENOMEM;
struct cpumask *masks = NULL;
+ if (numgrps == 0)
+ return NULL;
+
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return NULL;
@@ -386,7 +391,7 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
npresmsk, nmsk, masks);
if (ret < 0)
- goto fail_build_affinity;
+ goto fail_node_to_cpumask;
nr_present = ret;
/*
@@ -405,10 +410,6 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
if (ret >= 0)
nr_others = ret;
- fail_build_affinity:
- if (ret >= 0)
- WARN_ON(nr_present + nr_others < numgrps);
-
fail_node_to_cpumask:
free_node_to_cpumask(node_to_cpumask);
@@ -421,18 +422,24 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
kfree(masks);
return NULL;
}
+ *nummasks = min(nr_present + nr_others, numgrps);
return masks;
}
#else /* CONFIG_SMP */
-struct cpumask *group_cpus_evenly(unsigned int numgrps)
+struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
{
- struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ struct cpumask *masks;
+
+ if (numgrps == 0)
+ return NULL;
+ masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
if (!masks)
return NULL;
/* assign all CPUs(cpu 0) to the 1st group only */
cpumask_copy(&masks[0], cpu_possible_mask);
+ *nummasks = 1;
return masks;
}
#endif /* CONFIG_SMP */