diff options
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 33 | 
1 files changed, 28 insertions, 5 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 54dce019c0ce..50a5352f6205 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1816,6 +1816,10 @@ void __dl_clear_params(struct task_struct *p)  	dl_se->dl_period = 0;  	dl_se->flags = 0;  	dl_se->dl_bw = 0; + +	dl_se->dl_throttled = 0; +	dl_se->dl_new = 1; +	dl_se->dl_yielded = 0;  }  /* @@ -1844,7 +1848,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)  #endif  	RB_CLEAR_NODE(&p->dl.rb_node); -	hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); +	init_dl_task_timer(&p->dl);  	__dl_clear_params(p);  	INIT_LIST_HEAD(&p->rt.run_list); @@ -2054,6 +2058,9 @@ static inline int dl_bw_cpus(int i)   * allocated bandwidth to reflect the new situation.   *   * This function is called while holding p's rq->lock. + * + * XXX we should delay bw change until the task's 0-lag point, see + * __setparam_dl().   */  static int dl_overflow(struct task_struct *p, int policy,  		       const struct sched_attr *attr) @@ -3263,15 +3270,31 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)  {  	struct sched_dl_entity *dl_se = &p->dl; -	init_dl_task_timer(dl_se);  	dl_se->dl_runtime = attr->sched_runtime;  	dl_se->dl_deadline = attr->sched_deadline;  	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;  	dl_se->flags = attr->sched_flags;  	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); -	dl_se->dl_throttled = 0; -	dl_se->dl_new = 1; -	dl_se->dl_yielded = 0; + +	/* +	 * Changing the parameters of a task is 'tricky' and we're not doing +	 * the correct thing -- also see task_dead_dl() and switched_from_dl(). +	 * +	 * What we SHOULD do is delay the bandwidth release until the 0-lag +	 * point. This would include retaining the task_struct until that time +	 * and change dl_overflow() to not immediately decrement the current +	 * amount. +	 * +	 * Instead we retain the current runtime/deadline and let the new +	 * parameters take effect after the current reservation period lapses. +	 * This is safe (albeit pessimistic) because the 0-lag point is always +	 * before the current scheduling deadline. +	 * +	 * We can still have temporary overloads because we do not delay the +	 * change in bandwidth until that time; so admission control is +	 * not on the safe side. It does however guarantee tasks will never +	 * consume more than promised. +	 */  }  /*  | 
