summaryrefslogtreecommitdiff
path: root/include/linux/swait.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/swait.h')
-rw-r--r--include/linux/swait.h169
1 files changed, 142 insertions, 27 deletions
diff --git a/include/linux/swait.h b/include/linux/swait.h
index c1f9c62a8a50..d324419482a0 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -1,35 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SWAIT_H
#define _LINUX_SWAIT_H
#include <linux/list.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <asm/current.h>
/*
- * Simple wait queues
- *
- * While these are very similar to the other/complex wait queues (wait.h) the
- * most important difference is that the simple waitqueue allows for
- * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
+ * Simple waitqueues are semantically very different to regular wait queues
+ * (wait.h). The most important difference is that the simple waitqueue allows
+ * for deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
* times.
*
- * In order to make this so, we had to drop a fair number of features of the
- * other waitqueue code; notably:
+ * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
+ * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
+ * priority task a chance to run.
+ *
+ * Secondly, we had to drop a fair number of features of the other waitqueue
+ * code; notably:
*
* - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
* sleeper state.
*
- * - the exclusive mode; because this requires preserving the list order
- * and this is hard.
- *
- * - custom wake functions; because you cannot give any guarantees about
- * random code.
+ * - the !exclusive mode; because that leads to O(n) wakeups, everything is
+ * exclusive. As such swake_up_one will only ever awake _one_ waiter.
*
- * As a side effect of this; the data structures are slimmer.
+ * - custom wake callback functions; because you cannot give any guarantees
+ * about random code. This also allows swait to be used in RT, such that
+ * raw spinlock can be used for the swait queue head.
*
- * One would recommend using this wait queue where possible.
+ * As a side effect of these; the data structures are slimmer albeit more ad-hoc.
+ * For all the above, note that simple wait queues should _only_ be used under
+ * very specific realtime constraints -- it is best to stick with the regular
+ * wait queues in most cases.
*/
struct task_struct;
@@ -79,25 +85,79 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
DECLARE_SWAIT_QUEUE_HEAD(name)
#endif
-static inline int swait_active(struct swait_queue_head *q)
+/**
+ * swait_active -- locklessly test for waiters on the queue
+ * @wq: the waitqueue to test for waiters
+ *
+ * returns true if the wait list is not empty
+ *
+ * NOTE: this function is lockless and requires care, incorrect usage _will_
+ * lead to sporadic and non-obvious failure.
+ *
+ * NOTE2: this function has the same above implications as regular waitqueues.
+ *
+ * Use either while holding swait_queue_head::lock or when used for wakeups
+ * with an extra smp_mb() like:
+ *
+ * CPU0 - waker CPU1 - waiter
+ *
+ * for (;;) {
+ * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state);
+ * smp_mb(); // smp_mb() from set_current_state()
+ * if (swait_active(wq_head)) if (@cond)
+ * wake_up(wq_head); break;
+ * schedule();
+ * }
+ * finish_swait(&wq_head, &wait);
+ *
+ * Because without the explicit smp_mb() it's possible for the
+ * swait_active() load to get hoisted over the @cond store such that we'll
+ * observe an empty wait list while the waiter might not observe @cond.
+ * This, in turn, can trigger missing wakeups.
+ *
+ * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
+ * which (when the lock is uncontended) are of roughly equal cost.
+ */
+static inline int swait_active(struct swait_queue_head *wq)
+{
+ return !list_empty(&wq->task_list);
+}
+
+/**
+ * swq_has_sleeper - check if there are any waiting processes
+ * @wq: the waitqueue to test for waiters
+ *
+ * Returns true if @wq has waiting processes
+ *
+ * Please refer to the comment for swait_active.
+ */
+static inline bool swq_has_sleeper(struct swait_queue_head *wq)
{
- return !list_empty(&q->task_list);
+ /*
+ * We need to be sure we are in sync with the list_add()
+ * modifications to the wait queue (task_list).
+ *
+ * This memory barrier should be paired with one on the
+ * waiting side.
+ */
+ smp_mb();
+ return swait_active(wq);
}
-extern void swake_up(struct swait_queue_head *q);
+extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
-extern void swake_up_locked(struct swait_queue_head *q);
+extern void swake_up_locked(struct swait_queue_head *q, int wake_flags);
-extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
-/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
+/* as per ___wait_event() but for swait, therefore "exclusive == 1" */
#define ___swait_event(wq, condition, state, ret, cmd) \
({ \
+ __label__ __out; \
struct swait_queue __wait; \
long __ret = ret; \
\
@@ -110,20 +170,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
- break; \
+ goto __out; \
} \
\
cmd; \
} \
finish_swait(&wq, &__wait); \
- __ret; \
+__out: __ret; \
})
#define __swait_event(wq, condition) \
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
schedule())
-#define swait_event(wq, condition) \
+#define swait_event_exclusive(wq, condition) \
do { \
if (condition) \
break; \
@@ -135,7 +195,7 @@ do { \
TASK_UNINTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_timeout(wq, condition, timeout) \
+#define swait_event_timeout_exclusive(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -147,7 +207,7 @@ do { \
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
schedule())
-#define swait_event_interruptible(wq, condition) \
+#define swait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
@@ -160,7 +220,7 @@ do { \
TASK_INTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_interruptible_timeout(wq, condition, timeout) \
+#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -169,4 +229,59 @@ do { \
__ret; \
})
+#define __swait_event_idle(wq, condition) \
+ (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
+
+/**
+ * swait_event_idle_exclusive - wait without system load contribution
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
+ * true. The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * This function is mostly used when a kthread or workqueue waits for some
+ * condition and doesn't want to contribute to system load. Signals are
+ * ignored.
+ */
+#define swait_event_idle_exclusive(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event_idle(wq, condition); \
+} while (0)
+
+#define __swait_event_idle_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_IDLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout at which we'll give up in jiffies
+ *
+ * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
+ * true. The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * This function is mostly used when a kthread or workqueue waits for some
+ * condition and doesn't want to contribute to system load. Signals are
+ * ignored.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
+ */
+#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_idle_timeout(wq, \
+ condition, timeout); \
+ __ret; \
+})
+
#endif /* _LINUX_SWAIT_H */