From 8d495477d62e4397207f22a432fcaa86d9f2bc2d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 3 Oct 2019 18:17:45 +0200 Subject: sched/cputime: Spare a seqcount lock/unlock cycle on context switch On context switch we are locking the vtime seqcount of the scheduling-out task twice: * On vtime_task_switch_common(), when we flush the pending vtime through vtime_account_system() * On arch_vtime_task_switch() to reset the vtime state. This is pointless as these actions can be performed without the need to unlock/lock in the middle. The reason these steps are separated is to consolidate a very small amount of common code between CONFIG_VIRT_CPU_ACCOUNTING_GEN and CONFIG_VIRT_CPU_ACCOUNTING_NATIVE. Performance in this fast path is definitely a priority over artificial code factorization so split the task switch code between GEN and NATIVE and mutualize the parts than can run under a single seqcount locked block. As a side effect, vtime_account_idle() becomes included in the seqcount protection. This happens to be a welcome preparation in order to properly support kcpustat under vtime in the future and fetch CPUTIME_IDLE without race. Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Wanpeng Li Cc: Yauheni Kaliuta Link: https://lkml.kernel.org/r/20191003161745.28464-3-frederic@kernel.org Signed-off-by: Ingo Molnar --- include/linux/vtime.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'include/linux/vtime.h') diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 2fd247f90408..d9160ab3667a 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -14,8 +14,12 @@ struct task_struct; * vtime_accounting_cpu_enabled() definitions/declarations */ #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) + static inline bool vtime_accounting_cpu_enabled(void) { return true; } +extern void vtime_task_switch(struct task_struct *prev); + #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) + /* * Checks if vtime is enabled on some CPU. Cputime readers want to be careful * in that case and compute the tickless cputime. @@ -36,33 +40,29 @@ static inline bool vtime_accounting_cpu_enabled(void) return false; } + +extern void vtime_task_switch_generic(struct task_struct *prev); + +static inline void vtime_task_switch(struct task_struct *prev) +{ + if (vtime_accounting_cpu_enabled()) + vtime_task_switch_generic(prev); +} + #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ + static inline bool vtime_accounting_cpu_enabled(void) { return false; } -#endif +static inline void vtime_task_switch(struct task_struct *prev) { } +#endif /* * Common vtime APIs */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING - -#ifdef __ARCH_HAS_VTIME_TASK_SWITCH -extern void vtime_task_switch(struct task_struct *prev); -#else -extern void vtime_common_task_switch(struct task_struct *prev); -static inline void vtime_task_switch(struct task_struct *prev) -{ - if (vtime_accounting_cpu_enabled()) - vtime_common_task_switch(prev); -} -#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */ - extern void vtime_account_kernel(struct task_struct *tsk); extern void vtime_account_idle(struct task_struct *tsk); - #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ - -static inline void vtime_task_switch(struct task_struct *prev) { } static inline void vtime_account_kernel(struct task_struct *tsk) { } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ -- cgit