From 1051408f7ecdcd1baf86f5dd5fdc44740be3b23d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 1 Feb 2017 18:42:41 +0100 Subject: sched/autogroup: Rename auto_group.[ch] to autogroup.[ch] The names are all 'autogroup', not 'auto_group' - so rename the kernel/sched/auto_group.[ch] to match the existing nomenclature. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Thomas Gleixner Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/Makefile | 2 +- kernel/sched/auto_group.c | 271 ---------------------------------------------- kernel/sched/auto_group.h | 64 ----------- kernel/sched/autogroup.c | 271 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/autogroup.h | 64 +++++++++++ kernel/sched/sched.h | 2 +- 6 files changed, 337 insertions(+), 337 deletions(-) delete mode 100644 kernel/sched/auto_group.c delete mode 100644 kernel/sched/auto_group.h create mode 100644 kernel/sched/autogroup.c create mode 100644 kernel/sched/autogroup.h (limited to 'kernel/sched') diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 130ce8ac725b..89ab6758667b 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -19,7 +19,7 @@ obj-y += core.o loadavg.o clock.o cputime.o obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o obj-y += wait.o swait.o completion.o idle.o obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o -obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o +obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHED_DEBUG) += debug.o obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c deleted file mode 100644 index da39489d2d80..000000000000 --- a/kernel/sched/auto_group.c +++ /dev/null @@ -1,271 +0,0 @@ -#include "sched.h" - -#include -#include -#include -#include -#include -#include - -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; -static struct autogroup autogroup_default; -static atomic_t autogroup_seq_nr; - -void __init autogroup_init(struct task_struct *init_task) -{ - autogroup_default.tg = &root_task_group; - kref_init(&autogroup_default.kref); - init_rwsem(&autogroup_default.lock); - init_task->signal->autogroup = &autogroup_default; -} - -void autogroup_free(struct task_group *tg) -{ - kfree(tg->autogroup); -} - -static inline void autogroup_destroy(struct kref *kref) -{ - struct autogroup *ag = container_of(kref, struct autogroup, kref); - -#ifdef CONFIG_RT_GROUP_SCHED - /* We've redirected RT tasks to the root task group... */ - ag->tg->rt_se = NULL; - ag->tg->rt_rq = NULL; -#endif - sched_offline_group(ag->tg); - sched_destroy_group(ag->tg); -} - -static inline void autogroup_kref_put(struct autogroup *ag) -{ - kref_put(&ag->kref, autogroup_destroy); -} - -static inline struct autogroup *autogroup_kref_get(struct autogroup *ag) -{ - kref_get(&ag->kref); - return ag; -} - -static inline struct autogroup *autogroup_task_get(struct task_struct *p) -{ - struct autogroup *ag; - unsigned long flags; - - if (!lock_task_sighand(p, &flags)) - return autogroup_kref_get(&autogroup_default); - - ag = autogroup_kref_get(p->signal->autogroup); - unlock_task_sighand(p, &flags); - - return ag; -} - -static inline struct autogroup *autogroup_create(void) -{ - struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); - struct task_group *tg; - - if (!ag) - goto out_fail; - - tg = sched_create_group(&root_task_group); - - if (IS_ERR(tg)) - goto out_free; - - kref_init(&ag->kref); - init_rwsem(&ag->lock); - ag->id = atomic_inc_return(&autogroup_seq_nr); - ag->tg = tg; -#ifdef CONFIG_RT_GROUP_SCHED - /* - * Autogroup RT tasks are redirected to the root task group - * so we don't have to move tasks around upon policy change, - * or flail around trying to allocate bandwidth on the fly. - * A bandwidth exception in __sched_setscheduler() allows - * the policy change to proceed. - */ - free_rt_sched_group(tg); - tg->rt_se = root_task_group.rt_se; - tg->rt_rq = root_task_group.rt_rq; -#endif - tg->autogroup = ag; - - sched_online_group(tg, &root_task_group); - return ag; - -out_free: - kfree(ag); -out_fail: - if (printk_ratelimit()) { - printk(KERN_WARNING "autogroup_create: %s failure.\n", - ag ? "sched_create_group()" : "kmalloc()"); - } - - return autogroup_kref_get(&autogroup_default); -} - -bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) -{ - if (tg != &root_task_group) - return false; - /* - * If we race with autogroup_move_group() the caller can use the old - * value of signal->autogroup but in this case sched_move_task() will - * be called again before autogroup_kref_put(). - * - * However, there is no way sched_autogroup_exit_task() could tell us - * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. - */ - if (p->flags & PF_EXITING) - return false; - - return true; -} - -void sched_autogroup_exit_task(struct task_struct *p) -{ - /* - * We are going to call exit_notify() and autogroup_move_group() can't - * see this thread after that: we can no longer use signal->autogroup. - * See the PF_EXITING check in task_wants_autogroup(). - */ - sched_move_task(p); -} - -static void -autogroup_move_group(struct task_struct *p, struct autogroup *ag) -{ - struct autogroup *prev; - struct task_struct *t; - unsigned long flags; - - BUG_ON(!lock_task_sighand(p, &flags)); - - prev = p->signal->autogroup; - if (prev == ag) { - unlock_task_sighand(p, &flags); - return; - } - - p->signal->autogroup = autogroup_kref_get(ag); - /* - * We can't avoid sched_move_task() after we changed signal->autogroup, - * this process can already run with task_group() == prev->tg or we can - * race with cgroup code which can read autogroup = prev under rq->lock. - * In the latter case for_each_thread() can not miss a migrating thread, - * cpu_cgroup_attach() must not be possible after cgroup_exit() and it - * can't be removed from thread list, we hold ->siglock. - * - * If an exiting thread was already removed from thread list we rely on - * sched_autogroup_exit_task(). - */ - for_each_thread(p, t) - sched_move_task(t); - - unlock_task_sighand(p, &flags); - autogroup_kref_put(prev); -} - -/* Allocates GFP_KERNEL, cannot be called under any spinlock */ -void sched_autogroup_create_attach(struct task_struct *p) -{ - struct autogroup *ag = autogroup_create(); - - autogroup_move_group(p, ag); - /* drop extra reference added by autogroup_create() */ - autogroup_kref_put(ag); -} -EXPORT_SYMBOL(sched_autogroup_create_attach); - -/* Cannot be called under siglock. Currently has no users */ -void sched_autogroup_detach(struct task_struct *p) -{ - autogroup_move_group(p, &autogroup_default); -} -EXPORT_SYMBOL(sched_autogroup_detach); - -void sched_autogroup_fork(struct signal_struct *sig) -{ - sig->autogroup = autogroup_task_get(current); -} - -void sched_autogroup_exit(struct signal_struct *sig) -{ - autogroup_kref_put(sig->autogroup); -} - -static int __init setup_autogroup(char *str) -{ - sysctl_sched_autogroup_enabled = 0; - - return 1; -} - -__setup("noautogroup", setup_autogroup); - -#ifdef CONFIG_PROC_FS - -int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) -{ - static unsigned long next = INITIAL_JIFFIES; - struct autogroup *ag; - unsigned long shares; - int err; - - if (nice < MIN_NICE || nice > MAX_NICE) - return -EINVAL; - - err = security_task_setnice(current, nice); - if (err) - return err; - - if (nice < 0 && !can_nice(current, nice)) - return -EPERM; - - /* this is a heavy operation taking global locks.. */ - if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) - return -EAGAIN; - - next = HZ / 10 + jiffies; - ag = autogroup_task_get(p); - shares = scale_load(sched_prio_to_weight[nice + 20]); - - down_write(&ag->lock); - err = sched_group_set_shares(ag->tg, shares); - if (!err) - ag->nice = nice; - up_write(&ag->lock); - - autogroup_kref_put(ag); - - return err; -} - -void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) -{ - struct autogroup *ag = autogroup_task_get(p); - - if (!task_group_is_autogroup(ag->tg)) - goto out; - - down_read(&ag->lock); - seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); - up_read(&ag->lock); - -out: - autogroup_kref_put(ag); -} -#endif /* CONFIG_PROC_FS */ - -#ifdef CONFIG_SCHED_DEBUG -int autogroup_path(struct task_group *tg, char *buf, int buflen) -{ - if (!task_group_is_autogroup(tg)) - return 0; - - return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); -} -#endif /* CONFIG_SCHED_DEBUG */ diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h deleted file mode 100644 index 890c95f2587a..000000000000 --- a/kernel/sched/auto_group.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifdef CONFIG_SCHED_AUTOGROUP - -#include -#include - -struct autogroup { - /* - * reference doesn't mean how many thread attach to this - * autogroup now. It just stands for the number of task - * could use this autogroup. - */ - struct kref kref; - struct task_group *tg; - struct rw_semaphore lock; - unsigned long id; - int nice; -}; - -extern void autogroup_init(struct task_struct *init_task); -extern void autogroup_free(struct task_group *tg); - -static inline bool task_group_is_autogroup(struct task_group *tg) -{ - return !!tg->autogroup; -} - -extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); - -static inline struct task_group * -autogroup_task_group(struct task_struct *p, struct task_group *tg) -{ - int enabled = READ_ONCE(sysctl_sched_autogroup_enabled); - - if (enabled && task_wants_autogroup(p, tg)) - return p->signal->autogroup->tg; - - return tg; -} - -extern int autogroup_path(struct task_group *tg, char *buf, int buflen); - -#else /* !CONFIG_SCHED_AUTOGROUP */ - -static inline void autogroup_init(struct task_struct *init_task) { } -static inline void autogroup_free(struct task_group *tg) { } -static inline bool task_group_is_autogroup(struct task_group *tg) -{ - return 0; -} - -static inline struct task_group * -autogroup_task_group(struct task_struct *p, struct task_group *tg) -{ - return tg; -} - -#ifdef CONFIG_SCHED_DEBUG -static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) -{ - return 0; -} -#endif - -#endif /* CONFIG_SCHED_AUTOGROUP */ diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c new file mode 100644 index 000000000000..da39489d2d80 --- /dev/null +++ b/kernel/sched/autogroup.c @@ -0,0 +1,271 @@ +#include "sched.h" + +#include +#include +#include +#include +#include +#include + +unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; +static struct autogroup autogroup_default; +static atomic_t autogroup_seq_nr; + +void __init autogroup_init(struct task_struct *init_task) +{ + autogroup_default.tg = &root_task_group; + kref_init(&autogroup_default.kref); + init_rwsem(&autogroup_default.lock); + init_task->signal->autogroup = &autogroup_default; +} + +void autogroup_free(struct task_group *tg) +{ + kfree(tg->autogroup); +} + +static inline void autogroup_destroy(struct kref *kref) +{ + struct autogroup *ag = container_of(kref, struct autogroup, kref); + +#ifdef CONFIG_RT_GROUP_SCHED + /* We've redirected RT tasks to the root task group... */ + ag->tg->rt_se = NULL; + ag->tg->rt_rq = NULL; +#endif + sched_offline_group(ag->tg); + sched_destroy_group(ag->tg); +} + +static inline void autogroup_kref_put(struct autogroup *ag) +{ + kref_put(&ag->kref, autogroup_destroy); +} + +static inline struct autogroup *autogroup_kref_get(struct autogroup *ag) +{ + kref_get(&ag->kref); + return ag; +} + +static inline struct autogroup *autogroup_task_get(struct task_struct *p) +{ + struct autogroup *ag; + unsigned long flags; + + if (!lock_task_sighand(p, &flags)) + return autogroup_kref_get(&autogroup_default); + + ag = autogroup_kref_get(p->signal->autogroup); + unlock_task_sighand(p, &flags); + + return ag; +} + +static inline struct autogroup *autogroup_create(void) +{ + struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); + struct task_group *tg; + + if (!ag) + goto out_fail; + + tg = sched_create_group(&root_task_group); + + if (IS_ERR(tg)) + goto out_free; + + kref_init(&ag->kref); + init_rwsem(&ag->lock); + ag->id = atomic_inc_return(&autogroup_seq_nr); + ag->tg = tg; +#ifdef CONFIG_RT_GROUP_SCHED + /* + * Autogroup RT tasks are redirected to the root task group + * so we don't have to move tasks around upon policy change, + * or flail around trying to allocate bandwidth on the fly. + * A bandwidth exception in __sched_setscheduler() allows + * the policy change to proceed. + */ + free_rt_sched_group(tg); + tg->rt_se = root_task_group.rt_se; + tg->rt_rq = root_task_group.rt_rq; +#endif + tg->autogroup = ag; + + sched_online_group(tg, &root_task_group); + return ag; + +out_free: + kfree(ag); +out_fail: + if (printk_ratelimit()) { + printk(KERN_WARNING "autogroup_create: %s failure.\n", + ag ? "sched_create_group()" : "kmalloc()"); + } + + return autogroup_kref_get(&autogroup_default); +} + +bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) +{ + if (tg != &root_task_group) + return false; + /* + * If we race with autogroup_move_group() the caller can use the old + * value of signal->autogroup but in this case sched_move_task() will + * be called again before autogroup_kref_put(). + * + * However, there is no way sched_autogroup_exit_task() could tell us + * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. + */ + if (p->flags & PF_EXITING) + return false; + + return true; +} + +void sched_autogroup_exit_task(struct task_struct *p) +{ + /* + * We are going to call exit_notify() and autogroup_move_group() can't + * see this thread after that: we can no longer use signal->autogroup. + * See the PF_EXITING check in task_wants_autogroup(). + */ + sched_move_task(p); +} + +static void +autogroup_move_group(struct task_struct *p, struct autogroup *ag) +{ + struct autogroup *prev; + struct task_struct *t; + unsigned long flags; + + BUG_ON(!lock_task_sighand(p, &flags)); + + prev = p->signal->autogroup; + if (prev == ag) { + unlock_task_sighand(p, &flags); + return; + } + + p->signal->autogroup = autogroup_kref_get(ag); + /* + * We can't avoid sched_move_task() after we changed signal->autogroup, + * this process can already run with task_group() == prev->tg or we can + * race with cgroup code which can read autogroup = prev under rq->lock. + * In the latter case for_each_thread() can not miss a migrating thread, + * cpu_cgroup_attach() must not be possible after cgroup_exit() and it + * can't be removed from thread list, we hold ->siglock. + * + * If an exiting thread was already removed from thread list we rely on + * sched_autogroup_exit_task(). + */ + for_each_thread(p, t) + sched_move_task(t); + + unlock_task_sighand(p, &flags); + autogroup_kref_put(prev); +} + +/* Allocates GFP_KERNEL, cannot be called under any spinlock */ +void sched_autogroup_create_attach(struct task_struct *p) +{ + struct autogroup *ag = autogroup_create(); + + autogroup_move_group(p, ag); + /* drop extra reference added by autogroup_create() */ + autogroup_kref_put(ag); +} +EXPORT_SYMBOL(sched_autogroup_create_attach); + +/* Cannot be called under siglock. Currently has no users */ +void sched_autogroup_detach(struct task_struct *p) +{ + autogroup_move_group(p, &autogroup_default); +} +EXPORT_SYMBOL(sched_autogroup_detach); + +void sched_autogroup_fork(struct signal_struct *sig) +{ + sig->autogroup = autogroup_task_get(current); +} + +void sched_autogroup_exit(struct signal_struct *sig) +{ + autogroup_kref_put(sig->autogroup); +} + +static int __init setup_autogroup(char *str) +{ + sysctl_sched_autogroup_enabled = 0; + + return 1; +} + +__setup("noautogroup", setup_autogroup); + +#ifdef CONFIG_PROC_FS + +int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) +{ + static unsigned long next = INITIAL_JIFFIES; + struct autogroup *ag; + unsigned long shares; + int err; + + if (nice < MIN_NICE || nice > MAX_NICE) + return -EINVAL; + + err = security_task_setnice(current, nice); + if (err) + return err; + + if (nice < 0 && !can_nice(current, nice)) + return -EPERM; + + /* this is a heavy operation taking global locks.. */ + if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) + return -EAGAIN; + + next = HZ / 10 + jiffies; + ag = autogroup_task_get(p); + shares = scale_load(sched_prio_to_weight[nice + 20]); + + down_write(&ag->lock); + err = sched_group_set_shares(ag->tg, shares); + if (!err) + ag->nice = nice; + up_write(&ag->lock); + + autogroup_kref_put(ag); + + return err; +} + +void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) +{ + struct autogroup *ag = autogroup_task_get(p); + + if (!task_group_is_autogroup(ag->tg)) + goto out; + + down_read(&ag->lock); + seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); + up_read(&ag->lock); + +out: + autogroup_kref_put(ag); +} +#endif /* CONFIG_PROC_FS */ + +#ifdef CONFIG_SCHED_DEBUG +int autogroup_path(struct task_group *tg, char *buf, int buflen) +{ + if (!task_group_is_autogroup(tg)) + return 0; + + return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); +} +#endif /* CONFIG_SCHED_DEBUG */ diff --git a/kernel/sched/autogroup.h b/kernel/sched/autogroup.h new file mode 100644 index 000000000000..890c95f2587a --- /dev/null +++ b/kernel/sched/autogroup.h @@ -0,0 +1,64 @@ +#ifdef CONFIG_SCHED_AUTOGROUP + +#include +#include + +struct autogroup { + /* + * reference doesn't mean how many thread attach to this + * autogroup now. It just stands for the number of task + * could use this autogroup. + */ + struct kref kref; + struct task_group *tg; + struct rw_semaphore lock; + unsigned long id; + int nice; +}; + +extern void autogroup_init(struct task_struct *init_task); +extern void autogroup_free(struct task_group *tg); + +static inline bool task_group_is_autogroup(struct task_group *tg) +{ + return !!tg->autogroup; +} + +extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg); + +static inline struct task_group * +autogroup_task_group(struct task_struct *p, struct task_group *tg) +{ + int enabled = READ_ONCE(sysctl_sched_autogroup_enabled); + + if (enabled && task_wants_autogroup(p, tg)) + return p->signal->autogroup->tg; + + return tg; +} + +extern int autogroup_path(struct task_group *tg, char *buf, int buflen); + +#else /* !CONFIG_SCHED_AUTOGROUP */ + +static inline void autogroup_init(struct task_struct *init_task) { } +static inline void autogroup_free(struct task_group *tg) { } +static inline bool task_group_is_autogroup(struct task_group *tg) +{ + return 0; +} + +static inline struct task_group * +autogroup_task_group(struct task_struct *p, struct task_group *tg) +{ + return tg; +} + +#ifdef CONFIG_SCHED_DEBUG +static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) +{ + return 0; +} +#endif + +#endif /* CONFIG_SCHED_AUTOGROUP */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 17ed94b9b413..71b10a9b73cf 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1069,7 +1069,7 @@ static inline void sched_ttwu_pending(void) { } #endif /* CONFIG_SMP */ #include "stats.h" -#include "auto_group.h" +#include "autogroup.h" #ifdef CONFIG_CGROUP_SCHED -- cgit