diff options
| author | Arnd Bergmann <arnd@arndb.de> | 2011-10-20 15:14:25 +0200 | 
|---|---|---|
| committer | Arnd Bergmann <arnd@arndb.de> | 2011-10-20 15:14:25 +0200 | 
| commit | b4cbb8a4e602ea77b0525d06eff89c6a6070dab3 (patch) | |
| tree | a5dd723679582505ef3905c90f0c2c032d191b94 /kernel/sched.c | |
| parent | 526b264163068f77c5f2409031f5e25caf3900a9 (diff) | |
| parent | c5d7a9230e5e277f262b6806b7f4d6b35de5a3fb (diff) | |
Merge branch 'imx-features-for-arnd' of git://git.pengutronix.de/git/imx/linux-2.6 into imx/devel
Conflicts:
	arch/arm/mach-mx5/clock-mx51-mx53.c
	arch/arm/mach-mx5/devices-imx53.h
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 67 | 
1 files changed, 27 insertions, 40 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ccacdbdecf45..b50b0f0c9aa9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)  #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW  	local_irq_disable();  #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ -	perf_event_task_sched_in(current); +	perf_event_task_sched_in(prev, current);  #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW  	local_irq_enable();  #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ @@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)  }  /* - * Return sum_exec_runtime for the thread group. - * In case the task is currently running, return the sum plus current's - * pending runtime that have not been accounted yet. - * - * Note that the thread group might have other running tasks as well, - * so the return value not includes other pending runtime that other - * running tasks might have. - */ -unsigned long long thread_group_sched_runtime(struct task_struct *p) -{ -	struct task_cputime totals; -	unsigned long flags; -	struct rq *rq; -	u64 ns; - -	rq = task_rq_lock(p, &flags); -	thread_group_cputime(p, &totals); -	ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); -	task_rq_unlock(rq, p, &flags); - -	return ns; -} - -/*   * Account user cpu time to a process.   * @p: the process that the cpu time gets accounted to   * @cputime: the cpu time spent in user space since the last update @@ -4279,9 +4255,9 @@ pick_next_task(struct rq *rq)  }  /* - * schedule() is the main scheduler function. + * __schedule() is the main scheduler function.   */ -asmlinkage void __sched schedule(void) +static void __sched __schedule(void)  {  	struct task_struct *prev, *next;  	unsigned long *switch_count; @@ -4322,16 +4298,6 @@ need_resched:  				if (to_wakeup)  					try_to_wake_up_local(to_wakeup);  			} - -			/* -			 * If we are going to sleep and we have plugged IO -			 * queued, make sure to submit it to avoid deadlocks. -			 */ -			if (blk_needs_flush_plug(prev)) { -				raw_spin_unlock(&rq->lock); -				blk_schedule_flush_plug(prev); -				raw_spin_lock(&rq->lock); -			}  		}  		switch_count = &prev->nvcsw;  	} @@ -4369,6 +4335,26 @@ need_resched:  	if (need_resched())  		goto need_resched;  } + +static inline void sched_submit_work(struct task_struct *tsk) +{ +	if (!tsk->state) +		return; +	/* +	 * If we are going to sleep and we have plugged IO queued, +	 * make sure to submit it to avoid deadlocks. +	 */ +	if (blk_needs_flush_plug(tsk)) +		blk_schedule_flush_plug(tsk); +} + +asmlinkage void __sched schedule(void) +{ +	struct task_struct *tsk = current; + +	sched_submit_work(tsk); +	__schedule(); +}  EXPORT_SYMBOL(schedule);  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER @@ -4435,7 +4421,7 @@ asmlinkage void __sched notrace preempt_schedule(void)  	do {  		add_preempt_count_notrace(PREEMPT_ACTIVE); -		schedule(); +		__schedule();  		sub_preempt_count_notrace(PREEMPT_ACTIVE);  		/* @@ -4463,7 +4449,7 @@ asmlinkage void __sched preempt_schedule_irq(void)  	do {  		add_preempt_count(PREEMPT_ACTIVE);  		local_irq_enable(); -		schedule(); +		__schedule();  		local_irq_disable();  		sub_preempt_count(PREEMPT_ACTIVE); @@ -5588,7 +5574,7 @@ static inline int should_resched(void)  static void __cond_resched(void)  {  	add_preempt_count(PREEMPT_ACTIVE); -	schedule(); +	__schedule();  	sub_preempt_count(PREEMPT_ACTIVE);  } @@ -7443,6 +7429,7 @@ static void __sdt_free(const struct cpumask *cpu_map)  			struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);  			if (sd && (sd->flags & SD_OVERLAP))  				free_sched_groups(sd->groups, 0); +			kfree(*per_cpu_ptr(sdd->sd, j));  			kfree(*per_cpu_ptr(sdd->sg, j));  			kfree(*per_cpu_ptr(sdd->sgp, j));  		}  | 
