summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-05-08 16:51:41 +0200
committerIngo Molnar <mingo@kernel.org>2017-09-29 19:35:14 +0200
commit2a2f5d4e44ed160a5ed822c94e04f918f9fbb487 (patch)
tree044c01816758a1501c3565f6ebb53ef2c34c3ea9 /kernel/sched/sched.h
parent9059393e4ec1c8c6623a120b405ef2c90b968d80 (diff)
sched/fair: Rewrite cfs_rq->removed_*avg
Since on wakeup migration we don't hold the rq->lock for the old CPU we cannot update its state. Instead we add the removed 'load' to an atomic variable and have the next update on that CPU collect and process it. Currently we have 2 atomic variables; which already have the issue that they can be read out-of-sync. Also, two atomic ops on a single cacheline is already more expensive than an uncontended lock. Since we want to add more, convert the thing over to an explicit cacheline with a lock in. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h13
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a5d97460ee4e..2fd350a12bb7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -445,14 +445,19 @@ struct cfs_rq {
struct sched_avg avg;
u64 runnable_load_sum;
unsigned long runnable_load_avg;
+#ifndef CONFIG_64BIT
+ u64 load_last_update_time_copy;
+#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
unsigned long tg_load_avg_contrib;
unsigned long propagate_avg;
#endif
- atomic_long_t removed_load_avg, removed_util_avg;
-#ifndef CONFIG_64BIT
- u64 load_last_update_time_copy;
-#endif
+ struct {
+ raw_spinlock_t lock ____cacheline_aligned;
+ int nr;
+ unsigned long load_avg;
+ unsigned long util_avg;
+ } removed;
#ifdef CONFIG_FAIR_GROUP_SCHED
/*