diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 64 | 
1 files changed, 28 insertions, 36 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 18d63cea2848..12211e1666e2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);  extern signed long schedule_timeout_killable(signed long timeout);  extern signed long schedule_timeout_uninterruptible(signed long timeout);  asmlinkage void schedule(void); -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);  struct nsproxy;  struct user_namespace; @@ -731,10 +731,6 @@ struct sched_info {  	/* timestamps */  	unsigned long long last_arrival,/* when we last ran on a cpu */  			   last_queued;	/* when we were last queued to run */ -#ifdef CONFIG_SCHEDSTATS -	/* BKL stats */ -	unsigned int bkl_count; -#endif  };  #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ @@ -868,6 +864,7 @@ static inline int sd_power_saving_flags(void)  struct sched_group {  	struct sched_group *next;	/* Must be a circular list */ +	atomic_t ref;  	/*  	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a @@ -882,9 +879,6 @@ struct sched_group {  	 * NOTE: this field is variable length. (Allocated dynamically  	 * by attaching extra space to the end of the structure,  	 * depending on how many CPUs the kernel has booted up with) -	 * -	 * It is also be embedded into static data structures at build -	 * time. (See 'struct static_sched_group' in kernel/sched.c)  	 */  	unsigned long cpumask[0];  }; @@ -894,17 +888,6 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)  	return to_cpumask(sg->cpumask);  } -enum sched_domain_level { -	SD_LV_NONE = 0, -	SD_LV_SIBLING, -	SD_LV_MC, -	SD_LV_BOOK, -	SD_LV_CPU, -	SD_LV_NODE, -	SD_LV_ALLNODES, -	SD_LV_MAX -}; -  struct sched_domain_attr {  	int relax_domain_level;  }; @@ -913,6 +896,8 @@ struct sched_domain_attr {  	.relax_domain_level = -1,			\  } +extern int sched_domain_level_max; +  struct sched_domain {  	/* These fields must be setup */  	struct sched_domain *parent;	/* top domain must be null terminated */ @@ -930,7 +915,7 @@ struct sched_domain {  	unsigned int forkexec_idx;  	unsigned int smt_gain;  	int flags;			/* See SD_* */ -	enum sched_domain_level level; +	int level;  	/* Runtime fields. */  	unsigned long last_balance;	/* init to jiffies. units in jiffies */ @@ -973,6 +958,10 @@ struct sched_domain {  #ifdef CONFIG_SCHED_DEBUG  	char *name;  #endif +	union { +		void *private;		/* used during construction */ +		struct rcu_head rcu;	/* used during destruction */ +	};  	unsigned int span_weight;  	/* @@ -981,9 +970,6 @@ struct sched_domain {  	 * NOTE: this field is variable length. (Allocated dynamically  	 * by attaching extra space to the end of the structure,  	 * depending on how many CPUs the kernel has booted up with) -	 * -	 * It is also be embedded into static data structures at build -	 * time. (See 'struct static_sched_domain' in kernel/sched.c)  	 */  	unsigned long span[0];  }; @@ -1048,8 +1034,12 @@ struct sched_domain;  #define WF_FORK		0x02		/* child wakeup after fork */  #define ENQUEUE_WAKEUP		1 -#define ENQUEUE_WAKING		2 -#define ENQUEUE_HEAD		4 +#define ENQUEUE_HEAD		2 +#ifdef CONFIG_SMP +#define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */ +#else +#define ENQUEUE_WAKING		0 +#endif  #define DEQUEUE_SLEEP		1 @@ -1067,12 +1057,11 @@ struct sched_class {  	void (*put_prev_task) (struct rq *rq, struct task_struct *p);  #ifdef CONFIG_SMP -	int  (*select_task_rq)(struct rq *rq, struct task_struct *p, -			       int sd_flag, int flags); +	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);  	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);  	void (*post_schedule) (struct rq *this_rq); -	void (*task_waking) (struct rq *this_rq, struct task_struct *task); +	void (*task_waking) (struct task_struct *task);  	void (*task_woken) (struct rq *this_rq, struct task_struct *task);  	void (*set_cpus_allowed)(struct task_struct *p, @@ -1197,13 +1186,11 @@ struct task_struct {  	unsigned int flags;	/* per process flags, defined below */  	unsigned int ptrace; -	int lock_depth;		/* BKL lock depth */ -  #ifdef CONFIG_SMP -#ifdef __ARCH_WANT_UNLOCKED_CTXSW -	int oncpu; -#endif +	struct task_struct *wake_entry; +	int on_cpu;  #endif +	int on_rq;  	int prio, static_prio, normal_prio;  	unsigned int rt_priority; @@ -1274,6 +1261,7 @@ struct task_struct {  	/* Revert to default priority/policy when forking */  	unsigned sched_reset_on_fork:1; +	unsigned sched_contributes_to_load:1;  	pid_t pid;  	pid_t tgid; @@ -1537,6 +1525,9 @@ struct task_struct {  		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */  	} memcg_batch;  #endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT +	atomic_t ptrace_bp_refcnt; +#endif  };  /* Future-safe accessor for struct task_struct's cpus_allowed. */ @@ -2060,14 +2051,13 @@ extern void xtime_update(unsigned long ticks);  extern int wake_up_state(struct task_struct *tsk, unsigned int state);  extern int wake_up_process(struct task_struct *tsk); -extern void wake_up_new_task(struct task_struct *tsk, -				unsigned long clone_flags); +extern void wake_up_new_task(struct task_struct *tsk);  #ifdef CONFIG_SMP   extern void kick_process(struct task_struct *tsk);  #else   static inline void kick_process(struct task_struct *tsk) { }  #endif -extern void sched_fork(struct task_struct *p, int clone_flags); +extern void sched_fork(struct task_struct *p);  extern void sched_dead(struct task_struct *p);  extern void proc_caches_init(void); @@ -2192,8 +2182,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);  extern char *get_task_comm(char *to, struct task_struct *tsk);  #ifdef CONFIG_SMP +void scheduler_ipi(void);  extern unsigned long wait_task_inactive(struct task_struct *, long match_state);  #else +static inline void scheduler_ipi(void) { }  static inline unsigned long wait_task_inactive(struct task_struct *p,  					       long match_state)  {  | 
