From 0c9794c8b6781eb7dad8e19b78c5d4557790597a Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Thu, 27 Aug 2020 13:40:44 +0200 Subject: seqlock: seqcount latch APIs: Only allow seqcount_latch_t All latch sequence counter call-sites have now been converted from plain seqcount_t to the new seqcount_latch_t data type. Enforce type-safety by modifying seqlock.h latch APIs to only accept seqcount_latch_t. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200827114044.11173-9-a.darwish@linutronix.de --- include/linux/seqlock.h | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) (limited to 'include/linux/seqlock.h') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 88b917d4ebde..f2a7a467e998 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -620,7 +620,7 @@ static inline void seqcount_latch_init(seqcount_latch_t *s) /** * raw_read_seqcount_latch() - pick even/odd latch data copy - * @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t + * @s: Pointer to seqcount_latch_t * * See raw_write_seqcount_latch() for details and a full reader/writer * usage example. @@ -629,17 +629,14 @@ static inline void seqcount_latch_init(seqcount_latch_t *s) * picking which data copy to read. The full counter must then be checked * with read_seqcount_latch_retry(). */ -#define raw_read_seqcount_latch(s) \ -({ \ - /* \ - * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). \ - * Due to the dependent load, a full smp_rmb() is not needed. \ - */ \ - _Generic(*(s), \ - seqcount_t: READ_ONCE(((seqcount_t *)s)->sequence), \ - seqcount_raw_spinlock_t: READ_ONCE(((seqcount_raw_spinlock_t *)s)->seqcount.sequence), \ - seqcount_latch_t: READ_ONCE(((seqcount_latch_t *)s)->seqcount.sequence)); \ -}) +static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) +{ + /* + * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). + * Due to the dependent load, a full smp_rmb() is not needed. + */ + return READ_ONCE(s->seqcount.sequence); +} /** * read_seqcount_latch_retry() - end a seqcount_latch_t read section @@ -656,7 +653,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) /** * raw_write_seqcount_latch() - redirect latch readers to even/odd copy - * @s: Pointer to seqcount_t, seqcount_raw_spinlock_t, or seqcount_latch_t + * @s: Pointer to seqcount_latch_t * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -735,14 +732,11 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * When data is a dynamic data structure; one should use regular RCU * patterns to manage the lifetimes of the objects within. */ -#define raw_write_seqcount_latch(s) \ -{ \ - smp_wmb(); /* prior stores before incrementing "sequence" */ \ - _Generic(*(s), \ - seqcount_t: ((seqcount_t *)s)->sequence++, \ - seqcount_raw_spinlock_t:((seqcount_raw_spinlock_t *)s)->seqcount.sequence++, \ - seqcount_latch_t: ((seqcount_latch_t *)s)->seqcount.sequence++); \ - smp_wmb(); /* increment "sequence" before following stores */ \ +static inline void raw_write_seqcount_latch(seqcount_latch_t *s) +{ + smp_wmb(); /* prior stores before incrementing "sequence" */ + s->seqcount.sequence++; + smp_wmb(); /* increment "sequence" before following stores */ } /* -- cgit