summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-12-03 11:42:17 +0100
committerIngo Molnar <mingo@kernel.org>2018-12-03 11:42:17 +0100
commit5f675231e456cb599b283f8361f01cf34b0617df (patch)
treeafb2bdfd6fdbb18336146f41ba7659120a5ff9d2 /kernel/sched
parent3e184501083c38fa091f640acb13af17a21fd228 (diff)
parent2595646791c319cadfdbf271563aac97d0843dc7 (diff)
Merge tag 'v4.20-rc5' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c53
-rw-r--r--kernel/sched/psi.c71
-rw-r--r--kernel/sched/sched.h4
-rw-r--r--kernel/sched/stats.h8
4 files changed, 71 insertions, 65 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5afb868f7339..8050f266751a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2881,6 +2881,18 @@ unsigned long long nr_context_switches(void)
}
/*
+ * Consumers of these two interfaces, like for example the cpuidle menu
+ * governor, are using nonsensical data. Preferring shallow idle state selection
+ * for a CPU that has IO-wait which might not even end up running the task when
+ * it does become runnable.
+ */
+
+unsigned long nr_iowait_cpu(int cpu)
+{
+ return atomic_read(&cpu_rq(cpu)->nr_iowait);
+}
+
+/*
* IO-wait accounting, and how its mostly bollocks (on SMP).
*
* The idea behind IO-wait account is to account the idle time that we could
@@ -2915,31 +2927,11 @@ unsigned long nr_iowait(void)
unsigned long i, sum = 0;
for_each_possible_cpu(i)
- sum += atomic_read(&cpu_rq(i)->nr_iowait);
+ sum += nr_iowait_cpu(i);
return sum;
}
-/*
- * Consumers of these two interfaces, like for example the cpuidle menu
- * governor, are using nonsensical data. Preferring shallow idle state selection
- * for a CPU that has IO-wait which might not even end up running the task when
- * it does become runnable.
- */
-
-unsigned long nr_iowait_cpu(int cpu)
-{
- struct rq *this = cpu_rq(cpu);
- return atomic_read(&this->nr_iowait);
-}
-
-void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
-{
- struct rq *rq = this_rq();
- *nr_waiters = atomic_read(&rq->nr_iowait);
- *load = rq->load.weight;
-}
-
#ifdef CONFIG_SMP
/*
@@ -5746,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
#ifdef CONFIG_SCHED_SMT
/*
- * The sched_smt_present static key needs to be evaluated on every
- * hotplug event because at boot time SMT might be disabled when
- * the number of booted CPUs is limited.
- *
- * If then later a sibling gets hotplugged, then the key would stay
- * off and SMT scheduling would never be functional.
+ * When going up, increment the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
- static_branch_enable_cpuslocked(&sched_smt_present);
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_inc_cpuslocked(&sched_smt_present);
#endif
set_cpu_active(cpu, true);
@@ -5798,6 +5785,14 @@ int sched_cpu_deactivate(unsigned int cpu)
*/
synchronize_rcu_mult(call_rcu, call_rcu_sched);
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going down, decrement the number of cores with SMT present.
+ */
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
+
if (!sched_smp_initialized)
return 0;
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 7cdecfc010af..fe24de3fbc93 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -136,8 +136,18 @@
static int psi_bug __read_mostly;
-bool psi_disabled __read_mostly;
-core_param(psi_disabled, psi_disabled, bool, 0644);
+DEFINE_STATIC_KEY_FALSE(psi_disabled);
+
+#ifdef CONFIG_PSI_DEFAULT_DISABLED
+bool psi_enable;
+#else
+bool psi_enable = true;
+#endif
+static int __init setup_psi(char *str)
+{
+ return kstrtobool(str, &psi_enable) == 0;
+}
+__setup("psi=", setup_psi);
/* Running averages - we need to be higher-res than loadavg */
#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
@@ -169,8 +179,10 @@ static void group_init(struct psi_group *group)
void __init psi_init(void)
{
- if (psi_disabled)
+ if (!psi_enable) {
+ static_branch_enable(&psi_disabled);
return;
+ }
psi_period = jiffies_to_nsecs(PSI_FREQ);
group_init(&psi_system);
@@ -549,7 +561,7 @@ void psi_memstall_enter(unsigned long *flags)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
*flags = current->flags & PF_MEMSTALL;
@@ -579,7 +591,7 @@ void psi_memstall_leave(unsigned long *flags)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (*flags)
@@ -600,7 +612,7 @@ void psi_memstall_leave(unsigned long *flags)
#ifdef CONFIG_CGROUPS
int psi_cgroup_alloc(struct cgroup *cgroup)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return 0;
cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
@@ -612,7 +624,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
void psi_cgroup_free(struct cgroup *cgroup)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
cancel_delayed_work_sync(&cgroup->psi.clock_work);
@@ -633,38 +645,39 @@ void psi_cgroup_free(struct cgroup *cgroup)
*/
void cgroup_move_task(struct task_struct *task, struct css_set *to)
{
- bool move_psi = !psi_disabled;
unsigned int task_flags = 0;
struct rq_flags rf;
struct rq *rq;
- if (move_psi) {
- rq = task_rq_lock(task, &rf);
+ if (static_branch_likely(&psi_disabled)) {
+ /*
+ * Lame to do this here, but the scheduler cannot be locked
+ * from the outside, so we move cgroups from inside sched/.
+ */
+ rcu_assign_pointer(task->cgroups, to);
+ return;
+ }
- if (task_on_rq_queued(task))
- task_flags = TSK_RUNNING;
- else if (task->in_iowait)
- task_flags = TSK_IOWAIT;
+ rq = task_rq_lock(task, &rf);
- if (task->flags & PF_MEMSTALL)
- task_flags |= TSK_MEMSTALL;
+ if (task_on_rq_queued(task))
+ task_flags = TSK_RUNNING;
+ else if (task->in_iowait)
+ task_flags = TSK_IOWAIT;
- if (task_flags)
- psi_task_change(task, task_flags, 0);
- }
+ if (task->flags & PF_MEMSTALL)
+ task_flags |= TSK_MEMSTALL;
- /*
- * Lame to do this here, but the scheduler cannot be locked
- * from the outside, so we move cgroups from inside sched/.
- */
+ if (task_flags)
+ psi_task_change(task, task_flags, 0);
+
+ /* See comment above */
rcu_assign_pointer(task->cgroups, to);
- if (move_psi) {
- if (task_flags)
- psi_task_change(task, 0, task_flags);
+ if (task_flags)
+ psi_task_change(task, 0, task_flags);
- task_rq_unlock(rq, task, &rf);
- }
+ task_rq_unlock(rq, task, &rf);
}
#endif /* CONFIG_CGROUPS */
@@ -672,7 +685,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
{
int full;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return -EOPNOTSUPP;
update_stats(group);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e0e052a50fcd..71cd8b710599 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -23,6 +23,7 @@
#include <linux/sched/prio.h>
#include <linux/sched/rt.h>
#include <linux/sched/signal.h>
+#include <linux/sched/smt.h>
#include <linux/sched/stat.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/task.h>
@@ -941,9 +942,6 @@ static inline int cpu_of(struct rq *rq)
#ifdef CONFIG_SCHED_SMT
-
-extern struct static_key_false sched_smt_present;
-
extern void __update_idle_core(struct rq *rq);
static inline void update_idle_core(struct rq *rq)
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 4904c4677000..aa0de240fb41 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -66,7 +66,7 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
{
int clear = 0, set = TSK_RUNNING;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (!wakeup || p->sched_psi_wake_requeue) {
@@ -86,7 +86,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
{
int clear = TSK_RUNNING, set = 0;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (!sleep) {
@@ -102,7 +102,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
static inline void psi_ttwu_dequeue(struct task_struct *p)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
/*
* Is the task being migrated during a wakeup? Make sure to
@@ -128,7 +128,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
static inline void psi_task_tick(struct rq *rq)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (unlikely(rq->curr->flags & PF_MEMSTALL))