summaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@s-opensource.com>2016-11-16 16:42:27 -0200
committerMauro Carvalho Chehab <mchehab@s-opensource.com>2016-11-16 16:42:27 -0200
commit36f94a5cf0f9afb527f18166ae56bd3cc7204f63 (patch)
tree88d5d356343c2ddbdce083597dc0a63203171964 /kernel/sched/fair.c
parentc0026c7bfb95c250c3e34fde59f96ad72fd730d6 (diff)
parenta25f0944ba9b1d8a6813fd6f1a86f1bd59ac25a6 (diff)
Merge tag 'v4.9-rc5' into patchwork
Linux 4.9-rc5 * tag 'v4.9-rc5': (1102 commits) Linux 4.9-rc5 gp8psk: Fix DVB frontend attach gp8psk: fix gp8psk_usb_in_op() logic dvb-usb: move data_mutex to struct dvb_usb_device iio: maxim_thermocouple: detect invalid storage size in read() aoe: fix crash in page count manipulation lightnvm: invalid offset calculation for lba_shift Kbuild: enable -Wmaybe-uninitialized warnings by default pcmcia: fix return value of soc_pcmcia_regulator_set infiniband: shut up a maybe-uninitialized warning crypto: aesni: shut up -Wmaybe-uninitialized warning rc: print correct variable for z8f0811 dib0700: fix nec repeat handling s390: pci: don't print uninitialized data for debugging nios2: fix timer initcall return value x86: apm: avoid uninitialized data NFSv4.1: work around -Wmaybe-uninitialized warning Kbuild: enable -Wmaybe-uninitialized warning for "make W=1" lib/stackdepot: export save/fetch stack for drivers mm: kmemleak: scan .data.ro_after_init ...
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2d4ad72f8f3c..c242944f5cbd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -690,7 +690,14 @@ void init_entity_runnable_average(struct sched_entity *se)
* will definitely be update (after enqueue).
*/
sa->period_contrib = 1023;
- sa->load_avg = scale_load_down(se->load.weight);
+ /*
+ * Tasks are intialized with full load to be seen as heavy tasks until
+ * they get a chance to stabilize to their real load level.
+ * Group entities are intialized with zero load to reflect the fact that
+ * nothing has been attached to the task group yet.
+ */
+ if (entity_is_task(se))
+ sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
/*
* At this point, util_avg won't be used in select_task_rq_fair anyway
@@ -5471,13 +5478,18 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd
*/
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
{
- struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
- u64 avg_idle = this_rq()->avg_idle;
- u64 avg_cost = this_sd->avg_scan_cost;
+ struct sched_domain *this_sd;
+ u64 avg_cost, avg_idle = this_rq()->avg_idle;
u64 time, cost;
s64 delta;
int cpu, wrap;
+ this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
+ if (!this_sd)
+ return -1;
+
+ avg_cost = this_sd->avg_scan_cost;
+
/*
* Due to large variance we need a large fuzz factor; hackbench in
* particularly is sensitive here.
@@ -8827,7 +8839,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct sched_entity *se;
struct cfs_rq *cfs_rq;
- struct rq *rq;
int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8842,8 +8853,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(i) {
- rq = cpu_rq(i);
-
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)