diff options
| author | Maxime Ripard <mripard@kernel.org> | 2024-01-22 09:44:15 +0100 |
|---|---|---|
| committer | Maxime Ripard <mripard@kernel.org> | 2024-01-22 09:44:15 +0100 |
| commit | cf79f291f985662150363b4a93d16f88f12643bc (patch) | |
| tree | a803f6e9b1e34f100783538955b9ce34628cd5dd /lib/group_cpus.c | |
| parent | a20f1b02bafcbf5a32d96a1d4185d6981cf7d016 (diff) | |
| parent | 6613476e225e090cc9aad49be7fa504e290dd33d (diff) | |
Merge v6.8-rc1 into drm-misc-fixes
Let's kickstart the 6.8 fix cycle.
Signed-off-by: Maxime Ripard <mripard@kernel.org>
Diffstat (limited to 'lib/group_cpus.c')
| -rw-r--r-- | lib/group_cpus.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/lib/group_cpus.c b/lib/group_cpus.c index aa3f6815bb12..ee272c4cefcc 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -366,13 +366,25 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) if (!masks) goto fail_node_to_cpumask; - /* Stabilize the cpumasks */ - cpus_read_lock(); build_node_to_cpumask(node_to_cpumask); + /* + * Make a local cache of 'cpu_present_mask', so the two stages + * spread can observe consistent 'cpu_present_mask' without holding + * cpu hotplug lock, then we can reduce deadlock risk with cpu + * hotplug code. + * + * Here CPU hotplug may happen when reading `cpu_present_mask`, and + * we can live with the case because it only affects that hotplug + * CPU is handled in the 1st or 2nd stage, and either way is correct + * from API user viewpoint since 2-stage spread is sort of + * optimization. + */ + cpumask_copy(npresmsk, data_race(cpu_present_mask)); + /* grouping present CPUs first */ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, - cpu_present_mask, nmsk, masks); + npresmsk, nmsk, masks); if (ret < 0) goto fail_build_affinity; nr_present = ret; @@ -387,15 +399,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) curgrp = 0; else curgrp = nr_present; - cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); + cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk); ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, npresmsk, nmsk, masks); if (ret >= 0) nr_others = ret; fail_build_affinity: - cpus_read_unlock(); - if (ret >= 0) WARN_ON(nr_present + nr_others < numgrps); |
