summaryrefslogtreecommitdiff
path: root/include/linux/sched/ext.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched/ext.h')
-rw-r--r--include/linux/sched/ext.h57
1 files changed, 49 insertions, 8 deletions
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index 1d70a9867fb1..bcb962d5ee7d 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -17,7 +17,18 @@
enum scx_public_consts {
SCX_OPS_NAME_LEN = 128,
+ /*
+ * %SCX_SLICE_DFL is used to refill slices when the BPF scheduler misses
+ * to set the slice for a task that is selected for execution.
+ * %SCX_EV_REFILL_SLICE_DFL counts the number of times the default slice
+ * refill has been triggered.
+ *
+ * %SCX_SLICE_BYPASS is used as the slice for all tasks in the bypass
+ * mode. As making forward progress for all tasks is the main goal of
+ * the bypass mode, a shorter slice is used.
+ */
SCX_SLICE_DFL = 20 * 1000000, /* 20ms */
+ SCX_SLICE_BYPASS = 5 * 1000000, /* 5ms */
SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */
};
@@ -46,6 +57,7 @@ enum scx_dsq_id_flags {
SCX_DSQ_INVALID = SCX_DSQ_FLAG_BUILTIN | 0,
SCX_DSQ_GLOBAL = SCX_DSQ_FLAG_BUILTIN | 1,
SCX_DSQ_LOCAL = SCX_DSQ_FLAG_BUILTIN | 2,
+ SCX_DSQ_BYPASS = SCX_DSQ_FLAG_BUILTIN | 3,
SCX_DSQ_LOCAL_ON = SCX_DSQ_FLAG_BUILTIN | SCX_DSQ_FLAG_LOCAL_ON,
SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
};
@@ -58,6 +70,7 @@ enum scx_dsq_id_flags {
*/
struct scx_dispatch_q {
raw_spinlock_t lock;
+ struct task_struct __rcu *first_task; /* lockless peek at head */
struct list_head list; /* tasks in dispatch order */
struct rb_root priq; /* used to order by p->scx.dsq_vtime */
u32 nr;
@@ -108,7 +121,11 @@ enum scx_kf_mask {
SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
/* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
- /* ops.dequeue (in REST) may be nested inside DISPATCH */
+ /*
+ * ops.dispatch() may release rq lock temporarily and thus ENQUEUE and
+ * SELECT_CPU may be nested inside. ops.dequeue (in REST) may also be
+ * nested inside DISPATCH.
+ */
SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
@@ -132,6 +149,13 @@ struct scx_dsq_list_node {
u32 priv; /* can be used by iter cursor */
};
+#define INIT_DSQ_LIST_CURSOR(__node, __flags, __priv) \
+ (struct scx_dsq_list_node) { \
+ .node = LIST_HEAD_INIT((__node).node), \
+ .flags = SCX_DSQ_LNODE_ITER_CURSOR | (__flags), \
+ .priv = (__priv), \
+ }
+
/*
* The following is embedded in task_struct and contains all fields necessary
* for a task to be scheduled by SCX.
@@ -146,6 +170,7 @@ struct sched_ext_entity {
u32 weight;
s32 sticky_cpu;
s32 holding_cpu;
+ s32 selected_cpu;
u32 kf_mask; /* see scx_kf_mask above */
struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */
atomic_long_t ops_state;
@@ -163,7 +188,7 @@ struct sched_ext_entity {
/*
* Runtime budget in nsecs. This is usually set through
- * scx_bpf_dispatch() but can also be modified directly by the BPF
+ * scx_bpf_dsq_insert() but can also be modified directly by the BPF
* scheduler. Automatically decreased by SCX as the task executes. On
* depletion, a scheduling event is triggered.
*
@@ -175,10 +200,10 @@ struct sched_ext_entity {
/*
* Used to order tasks when dispatching to the vtime-ordered priority
- * queue of a dsq. This is usually set through scx_bpf_dispatch_vtime()
- * but can also be modified directly by the BPF scheduler. Modifying it
- * while a task is queued on a dsq may mangle the ordering and is not
- * recommended.
+ * queue of a dsq. This is usually set through
+ * scx_bpf_dsq_insert_vtime() but can also be modified directly by the
+ * BPF scheduler. Modifying it while a task is queued on a dsq may
+ * mangle the ordering and is not recommended.
*/
u64 dsq_vtime;
@@ -202,15 +227,31 @@ struct sched_ext_entity {
struct list_head tasks_node;
};
-void sched_ext_free(struct task_struct *p);
+void sched_ext_dead(struct task_struct *p);
void print_scx_info(const char *log_lvl, struct task_struct *p);
void scx_softlockup(u32 dur_s);
+bool scx_hardlockup(int cpu);
+bool scx_rcu_cpu_stall(void);
#else /* !CONFIG_SCHED_CLASS_EXT */
-static inline void sched_ext_free(struct task_struct *p) {}
+static inline void sched_ext_dead(struct task_struct *p) {}
static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
static inline void scx_softlockup(u32 dur_s) {}
+static inline bool scx_hardlockup(int cpu) { return false; }
+static inline bool scx_rcu_cpu_stall(void) { return false; }
#endif /* CONFIG_SCHED_CLASS_EXT */
+
+struct scx_task_group {
+#ifdef CONFIG_EXT_GROUP_SCHED
+ u32 flags; /* SCX_TG_* */
+ u32 weight;
+ u64 bw_period_us;
+ u64 bw_quota_us;
+ u64 bw_burst_us;
+ bool idle;
+#endif
+};
+
#endif /* _LINUX_SCHED_EXT_H */