summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2020-11-07 12:50:48 +0100
committerIngo Molnar <mingo@kernel.org>2020-11-07 13:20:17 +0100
commit666fab4a3ea143315a9c059fad9f3a0f1365d54b (patch)
treee9e4be3b0eeac79346d52a86183326617d0c9999 /include/linux/sched.h
parent0a986ea81e1aa8ac17e82cda53cc95158217956e (diff)
parent659caaf65dc9c7150aa3e80225ec6e66b25ab3ce (diff)
Merge branch 'linus' into perf/kprobes
Conflicts: include/asm-generic/atomic-instrumented.h kernel/kprobes.c Use the upstream atomic-instrumented.h checksum, and pick the kprobes version of kernel/kprobes.c, which effectively reverts this upstream workaround: 645f224e7ba2: ("kprobes: Tell lockdep about kprobe nesting") Since the new code *should* be fine without nesting. Knock on wood ... Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h18
1 files changed, 15 insertions, 3 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5911805cafde..393db0690101 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -63,6 +63,7 @@ struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
+struct io_uring_task;
/*
* Task state bitmask. NOTE! These bits are also
@@ -935,6 +936,10 @@ struct task_struct {
/* Open file information: */
struct files_struct *files;
+#ifdef CONFIG_IO_URING
+ struct io_uring_task *io_uring;
+#endif
+
/* Namespaces: */
struct nsproxy *nsproxy;
@@ -1008,7 +1013,7 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
-#ifdef CONFIG_UBSAN
+#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
unsigned int in_ubsan;
#endif
@@ -1203,6 +1208,10 @@ struct task_struct {
#endif
#endif
+#if IS_ENABLED(CONFIG_KUNIT)
+ struct kunit *kunit_test;
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
@@ -1308,6 +1317,8 @@ struct task_struct {
#endif
#ifdef CONFIG_X86_MCE
+ void __user *mce_vaddr;
+ __u64 mce_kflags;
u64 mce_addr;
__u64 mce_ripv : 1,
mce_whole_page : 1,
@@ -1493,9 +1504,10 @@ extern struct pid *cad_pid;
/*
* Per process flags
*/
+#define PF_VCPU 0x00000001 /* I'm a virtual CPU */
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
-#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
@@ -1519,7 +1531,6 @@ extern struct pid *cad_pid;
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
-#define PF_IO_WORKER 0x20000000 /* Task is an IO worker */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
@@ -2048,6 +2059,7 @@ const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
int sched_trace_rq_cpu(struct rq *rq);
+int sched_trace_rq_cpu_capacity(struct rq *rq);
int sched_trace_rq_nr_running(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);