summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-02-03 22:07:57 +0100
committerIngo Molnar <mingo@kernel.org>2017-03-03 01:43:47 +0100
commitd04b0ad37e4b6ac39a56c823ae76ab37cd044dc7 (patch)
treedbc70de4f29558e567a2efe1affa2c131cf1fb32
parentc7af7877eeacfeaaf6a1b6f54c481292ef116837 (diff)
sched/headers: Move the PREEMPT_COUNT defines from <linux/sched.h> to <linux/preempt.h>
These defines are not really part of the scheduler's driver API, but are related to the preempt count - so move them to <linux/preempt.h>. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/preempt.h21
-rw-r--r--include/linux/sched.h21
2 files changed, 21 insertions, 21 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 7eeceac52dea..cae461224948 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -55,6 +55,27 @@
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
+#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
+
+/*
+ * Disable preemption until the scheduler is running -- use an unconditional
+ * value so that it also works on !PREEMPT_COUNT kernels.
+ *
+ * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
+ */
+#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
+
+/*
+ * Initial preempt_count value; reflects the preempt_count schedule invariant
+ * which states that during context switches:
+ *
+ * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
+ *
+ * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
+ * Note: See finish_task_switch().
+ */
+#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
+
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5398356e33c7..3b3e31da416e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -265,27 +265,6 @@ struct task_cputime_atomic {
.sum_exec_runtime = ATOMIC64_INIT(0), \
}
-#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
-
-/*
- * Disable preemption until the scheduler is running -- use an unconditional
- * value so that it also works on !PREEMPT_COUNT kernels.
- *
- * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
- */
-#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
-
-/*
- * Initial preempt_count value; reflects the preempt_count schedule invariant
- * which states that during context switches:
- *
- * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
- *
- * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
- * Note: See finish_task_switch().
- */
-#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
-
/**
* struct thread_group_cputimer - thread group interval timer counts
* @cputime_atomic: atomic thread group interval timers.