summaryrefslogtreecommitdiff
path: root/include/linux/seqlock.h
diff options
context:
space:
mode:
authorAhmed S. Darwish <a.darwish@linutronix.de>2020-09-04 17:32:29 +0200
committerPeter Zijlstra <peterz@infradead.org>2020-09-10 11:19:31 +0200
commit52ac39e5db5148f70392edb654ad882ac8da88a8 (patch)
treec22b4d728073cedc37e6629ddf62e68ecb03b879 /include/linux/seqlock.h
parent5cdd25572a29e46f932d3e6eedbd07429de66431 (diff)
seqlock: seqcount_t: Implement all read APIs as statement expressions
The sequence counters read APIs are implemented as CPP macros, so they can take either seqcount_t or any of the seqcount_LOCKNAME_t variants. Such macros then get *directly* transformed to internal C functions that only take plain seqcount_t. Further commits need access to seqcount_LOCKNAME_t inside of the actual read APIs code. Thus transform all of the seqcount read APIs to pure GCC statement expressions instead. This will not break type-safety: all of the transformed APIs resolve to a _Generic() selection that does not have a "default" case. This will also not affect the transformed APIs readability: previously added kernel-doc above all of seqlock.h functions makes the expectations quite clear for call-site developers. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200904153231.11994-4-a.darwish@linutronix.de
Diffstat (limited to 'include/linux/seqlock.h')
-rw-r--r--include/linux/seqlock.h94
1 files changed, 45 insertions, 49 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 0b4a22f33ac3..f3b78277e26b 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -184,6 +184,12 @@ __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
return &s->seqcount; \
} \
\
+static __always_inline unsigned \
+__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
+{ \
+ return READ_ONCE(s->seqcount.sequence); \
+} \
+ \
static __always_inline bool \
__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
{ \
@@ -205,6 +211,11 @@ static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
return s;
}
+static inline unsigned __seqprop_sequence(const seqcount_t *s)
+{
+ return READ_ONCE(s->sequence);
+}
+
static inline bool __seqprop_preemptible(const seqcount_t *s)
{
return false;
@@ -250,6 +261,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
__seqprop_case((s), ww_mutex, prop))
#define __seqcount_ptr(s) __seqprop(s, ptr)
+#define __seqcount_sequence(s) __seqprop(s, sequence)
#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
@@ -268,21 +280,15 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base)
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
- __read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret;
-
-repeat:
- ret = READ_ONCE(s->sequence);
- if (unlikely(ret & 1)) {
- cpu_relax();
- goto repeat;
- }
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned seq; \
+ \
+ while ((seq = __seqcount_sequence(s)) & 1) \
+ cpu_relax(); \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ seq; \
+})
/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
@@ -291,14 +297,12 @@ repeat:
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount_begin(s) \
- raw_read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret = __read_seqcount_t_begin(s);
- smp_rmb();
- return ret;
-}
+({ \
+ unsigned seq = __read_seqcount_begin(s); \
+ \
+ smp_rmb(); \
+ seq; \
+})
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -307,13 +311,10 @@ static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define read_seqcount_begin(s) \
- read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
-{
- seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_t_begin(s);
-}
+({ \
+ seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
+ raw_read_seqcount_begin(s); \
+})
/**
* raw_read_seqcount() - read the raw seqcount_t counter value
@@ -327,15 +328,13 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount(s) \
- raw_read_seqcount_t(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
-{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned seq = __seqcount_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ seq; \
+})
/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
@@ -355,16 +354,13 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_seqcount_begin(s) \
- raw_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
-{
- /*
- * If the counter is odd, let read_seqcount_retry() fail
- * by decrementing the counter.
- */
- return raw_read_seqcount_t(s) & ~1;
-}
+({ \
+ /* \
+ * If the counter is odd, let read_seqcount_retry() fail \
+ * by decrementing the counter. \
+ */ \
+ raw_read_seqcount(s) & ~1; \
+})
/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier