summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h45
1 files changed, 28 insertions, 17 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 06b4e3bda93a..00de3e950dd4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -118,7 +118,7 @@ struct task_group;
* the comment with set_special_state().
*/
#define is_special_task_state(state) \
- ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
#define __set_current_state(state_value) \
do { \
@@ -167,8 +167,8 @@ struct task_group;
* need_sleep = false;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
- * Where wake_up_state() (and all other wakeup primitives) imply enough
- * barriers to order the store of the variable against wakeup.
+ * where wake_up_state() executes a full memory barrier before accessing the
+ * task state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -722,8 +722,8 @@ struct task_struct {
unsigned restore_sigmask:1;
#endif
#ifdef CONFIG_MEMCG
- unsigned memcg_may_oom:1;
-#ifndef CONFIG_SLOB
+ unsigned in_user_fault:1;
+#ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1;
#endif
#endif
@@ -734,6 +734,10 @@ struct task_struct {
/* disallow userland-initiated cgroup migration */
unsigned no_cgroup_migration:1;
#endif
+#ifdef CONFIG_BLK_CGROUP
+ /* to be used once the psi infrastructure lands upstream. */
+ unsigned use_memdelay:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -1018,7 +1022,6 @@ struct task_struct {
u64 last_sum_exec_runtime;
struct callback_head numa_work;
- struct list_head numa_entry;
struct numa_group *numa_group;
/*
@@ -1150,6 +1153,13 @@ struct task_struct {
/* Number of pages to reclaim on returning to userland: */
unsigned int memcg_nr_pages_over_high;
+
+ /* Used by memcontrol for targeted memcg charge: */
+ struct mem_cgroup *active_memcg;
+#endif
+
+#ifdef CONFIG_BLK_CGROUP
+ struct request_queue *throttle_queue;
#endif
#ifdef CONFIG_UPROBES
@@ -1780,20 +1790,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
-void __rseq_handle_notify_resume(struct pt_regs *regs);
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
if (current->rseq)
- __rseq_handle_notify_resume(regs);
+ __rseq_handle_notify_resume(ksig, regs);
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
preempt_disable();
__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
preempt_enable();
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(ksig, regs);
}
/* rseq_preempt() requires preemption to be disabled. */
@@ -1812,9 +1824,7 @@ static inline void rseq_migrate(struct task_struct *t)
/*
* If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread. In
- * case a parent fork() in the middle of a restartable sequence, set the
- * resume notifier to force the child to retry.
+ * child inherits. Only applies when forking a process, not a thread.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
@@ -1828,7 +1838,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
- rseq_preempt(t);
}
}
@@ -1845,10 +1854,12 @@ static inline void rseq_execve(struct task_struct *t)
static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
static inline void rseq_preempt(struct task_struct *t)