summaryrefslogtreecommitdiff
path: root/include/linux/cpumask.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/cpumask.h')
-rw-r--r--include/linux/cpumask.h137
1 files changed, 100 insertions, 37 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bd047864c7ac..2f065ad97541 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -35,19 +35,23 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
-#if NR_CPUS == 1
-#define nr_cpu_ids 1U
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+#define nr_cpu_ids ((unsigned int)NR_CPUS)
#else
extern unsigned int nr_cpu_ids;
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
+static inline void set_nr_cpu_ids(unsigned int nr)
+{
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+ WARN_ON(nr != nr_cpu_ids);
#else
-#define nr_cpumask_bits ((unsigned int)NR_CPUS)
+ nr_cpu_ids = nr;
#endif
+}
+
+/* Deprecated. Always use nr_cpu_ids. */
+#define nr_cpumask_bits nr_cpu_ids
/*
* The following particular system cpumasks and operations manage
@@ -67,10 +71,6 @@ extern unsigned int nr_cpu_ids;
* cpu_online_mask is the dynamic subset of cpu_present_mask,
* indicating those CPUs available for scheduling.
*
- * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
- * all NR_CPUS bits set, otherwise it is just the set of CPUs that
- * ACPI reports present at boot.
- *
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
* depending on what ACPI reports as currently plugged in, otherwise
* cpu_present_mask is just a copy of cpu_possible_mask.
@@ -174,9 +174,8 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
static inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
+ /* n is a prior cpu */
+ cpumask_check(n + 1);
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
}
@@ -189,9 +188,8 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
*/
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
+ /* n is a prior cpu */
+ cpumask_check(n + 1);
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
}
@@ -231,9 +229,8 @@ static inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
+ /* n is a prior cpu */
+ cpumask_check(n + 1);
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits, n + 1);
}
@@ -246,9 +243,7 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+ for_each_set_bit(cpu, cpumask_bits(mask), nr_cpumask_bits)
/**
* for_each_cpu_not - iterate over every cpu in a complemented mask
@@ -258,17 +253,15 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_zero((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+ for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits)
#if NR_CPUS == 1
static inline
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
cpumask_check(start);
- if (n != -1)
- cpumask_check(n);
+ /* n is a prior cpu */
+ cpumask_check(n + 1);
/*
* Return the first available CPU when wrapping, or when starting before cpu0,
@@ -293,10 +286,8 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
- (cpu) < nr_cpumask_bits; \
- (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for_each_set_bit_wrap(cpu, cpumask_bits(mask), nr_cpumask_bits, start)
/**
* for_each_cpu_and - iterate over every cpu in both masks
@@ -313,9 +304,25 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_and(cpu, mask1, mask2) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
- (cpu) < nr_cpu_ids;)
+ for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), nr_cpumask_bits)
+
+/**
+ * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
+ * those present in another.
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_andnot(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_andnot(cpu, mask1, mask2) \
+ for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), nr_cpumask_bits)
/**
* cpumask_any_but - return a "random" in a cpumask, but not this one.
@@ -337,6 +344,50 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
return i;
}
+/**
+ * cpumask_nth - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+{
+ return find_nth_bit(cpumask_bits(srcp), nr_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and - get the first cpu in 2 cpumasks
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ nr_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ nr_cpumask_bits, cpumask_check(cpu));
+}
+
#define CPU_BITS_NONE \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
@@ -587,6 +638,17 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
}
/**
+ * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ */
+static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits);
+}
+
+/**
* cpumask_shift_right - *dstp = *srcp >> n
* @dstp: the cpumask result
* @srcp: the input to shift
@@ -1127,9 +1189,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
* cover a worst-case of every other cpu being on one of two nodes for a
* very large NR_CPUS.
*
- * Use PAGE_SIZE as a minimum for smaller configurations.
+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ * unsigned comparison to -1.
*/
-#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)