diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-19 12:43:11 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-19 12:43:11 -0800 |
commit | 364eeb79a213fcf9164208b53764223ad522d6b3 (patch) | |
tree | 63ea70096151ca2a217edf29ba6fe65e6be4be1b /include/linux/cleanup.h | |
parent | d8d78a90e7fca1ce7c90fa791400b287bc5b42a1 (diff) | |
parent | 3b49a347d751553b1d1be69c8619ae2e85fdc28d (diff) |
Merge tag 'locking-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Lockdep:
- Enable PROVE_RAW_LOCK_NESTING with PROVE_LOCKING (Sebastian Andrzej
Siewior)
- Add lockdep_cleanup_dead_cpu() (David Woodhouse)
futexes:
- Use atomic64_inc_return() in get_inode_sequence_number() (Uros
Bizjak)
- Use atomic64_try_cmpxchg_relaxed() in get_inode_sequence_number()
(Uros Bizjak)
RT locking:
- Add sparse annotation PREEMPT_RT's locking (Sebastian Andrzej
Siewior)
spinlocks:
- Use atomic_try_cmpxchg_release() in osq_unlock() (Uros Bizjak)
atomics:
- x86: Use ALT_OUTPUT_SP() for __alternative_atomic64() (Uros Bizjak)
- x86: Use ALT_OUTPUT_SP() for __arch_{,try_}cmpxchg64_emu() (Uros
Bizjak)
KCSAN, seqlocks:
- Support seqcount_latch_t (Marco Elver)
<linux/cleanup.h>:
- Add if_not_guard() conditional guard helper (David Lechner)
- Adjust scoped_guard() macros to avoid potential warning (Przemek
Kitszel)
- Remove address space of returned pointer (Uros Bizjak)
WW mutexes:
- locking/ww_mutex: Adjust to lockdep nest_lock requirements (Thomas
Hellström)
Rust integration:
- Fix raw_spin_lock initialization on PREEMPT_RT (Eder Zulian)
Misc cleanups & fixes:
- lockdep: Fix wait-type check related warnings (Ahmed Ehab)
- lockdep: Use info level for initial info messages (Jiri Slaby)
- spinlocks: Make __raw_* lock ops static (Geert Uytterhoeven)
- pvqspinlock: Convert fields of 'enum vcpu_state' to uppercase
(Qiuxu Zhuo)
- iio: magnetometer: Fix if () scoped_guard() formatting (Stephen
Rothwell)
- rtmutex: Fix misleading comment (Peter Zijlstra)
- percpu-rw-semaphores: Fix grammar in percpu-rw-semaphore.rst (Xiu
Jianfeng)"
* tag 'locking-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits)
locking/Documentation: Fix grammar in percpu-rw-semaphore.rst
iio: magnetometer: fix if () scoped_guard() formatting
rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT
kcsan, seqlock: Fix incorrect assumption in read_seqbegin()
seqlock, treewide: Switch to non-raw seqcount_latch interface
kcsan, seqlock: Support seqcount_latch_t
time/sched_clock: Broaden sched_clock()'s instrumentation coverage
time/sched_clock: Swap update_clock_read_data() latch writes
locking/atomic/x86: Use ALT_OUTPUT_SP() for __arch_{,try_}cmpxchg64_emu()
locking/atomic/x86: Use ALT_OUTPUT_SP() for __alternative_atomic64()
cleanup: Add conditional guard helper
cleanup: Adjust scoped_guard() macros to avoid potential warning
locking/osq_lock: Use atomic_try_cmpxchg_release() in osq_unlock()
cleanup: Remove address space of returned pointer
locking/rtmutex: Fix misleading comment
locking/rt: Annotate unlock followed by lock for sparse.
locking/rt: Add sparse annotation for RCU.
locking/rt: Remove one __cond_lock() in RT's spin_trylock_irqsave()
locking/rt: Add sparse annotation PREEMPT_RT's sleeping locks.
locking/pvqspinlock: Convert fields of 'enum vcpu_state' to uppercase
...
Diffstat (limited to 'include/linux/cleanup.h')
-rw-r--r-- | include/linux/cleanup.h | 69 |
1 files changed, 58 insertions, 11 deletions
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index 875c998275c0..966fcc5ff8ef 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -273,6 +273,12 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ * an anonymous instance of the (guard) class, not recommended for * conditional locks. * + * if_not_guard(name, args...) { <error handling> }: + * convenience macro for conditional guards that calls the statement that + * follows only if the lock was not acquired (typically an error return). + * + * Only for conditional locks. + * * scoped_guard (name, args...) { }: * similar to CLASS(name, scope)(args), except the variable (with the * explicit name 'scope') is declard in a for-loop such that its scope is @@ -285,14 +291,20 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ * similar to scoped_guard(), except it does fail when the lock * acquire fails. * + * Only for conditional locks. */ +#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \ +static __maybe_unused const bool class_##_name##_is_conditional = _is_cond + #define DEFINE_GUARD(_name, _type, _lock, _unlock) \ + __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \ - { return *_T; } + { return (void *)(__force unsigned long)*_T; } #define DEFINE_GUARD_COND(_name, _ext, _condlock) \ + __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ EXTEND_CLASS(_name, _ext, \ ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \ class_##_name##_t _T) \ @@ -303,16 +315,48 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ CLASS(_name, __UNIQUE_ID(guard)) #define __guard_ptr(_name) class_##_name##_lock_ptr +#define __is_cond_ptr(_name) class_##_name##_is_conditional -#define scoped_guard(_name, args...) \ - for (CLASS(_name, scope)(args), \ - *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1) - -#define scoped_cond_guard(_name, _fail, args...) \ - for (CLASS(_name, scope)(args), \ - *done = NULL; !done; done = (void *)1) \ - if (!__guard_ptr(_name)(&scope)) _fail; \ - else +/* + * Helper macro for scoped_guard(). + * + * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that + * compiler would be sure that for the unconditional locks the body of the + * loop (caller-provided code glued to the else clause) could not be skipped. + * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too + * hard to deduce (even if could be proven true for unconditional locks). + */ +#define __scoped_guard(_name, _label, args...) \ + for (CLASS(_name, scope)(args); \ + __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \ + ({ goto _label; })) \ + if (0) { \ +_label: \ + break; \ + } else + +#define scoped_guard(_name, args...) \ + __scoped_guard(_name, __UNIQUE_ID(label), args) + +#define __scoped_cond_guard(_name, _fail, _label, args...) \ + for (CLASS(_name, scope)(args); true; ({ goto _label; })) \ + if (!__guard_ptr(_name)(&scope)) { \ + BUILD_BUG_ON(!__is_cond_ptr(_name)); \ + _fail; \ +_label: \ + break; \ + } else + +#define scoped_cond_guard(_name, _fail, args...) \ + __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args) + +#define __if_not_guard(_name, _id, args...) \ + BUILD_BUG_ON(!__is_cond_ptr(_name)); \ + CLASS(_name, _id)(args); \ + if (!__guard_ptr(_name)(&_id)) + +#define if_not_guard(_name, args...) \ + __if_not_guard(_name, __UNIQUE_ID(guard), args) /* * Additional helper macros for generating lock guards with types, either for @@ -347,7 +391,7 @@ static inline void class_##_name##_destructor(class_##_name##_t *_T) \ \ static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \ { \ - return _T->lock; \ + return (void *)(__force unsigned long)_T->lock; \ } @@ -369,14 +413,17 @@ static inline class_##_name##_t class_##_name##_constructor(void) \ } #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \ +__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \ __DEFINE_LOCK_GUARD_1(_name, _type, _lock) #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \ +__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \ __DEFINE_LOCK_GUARD_0(_name, _lock) #define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \ + __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ EXTEND_CLASS(_name, _ext, \ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\ if (_T->lock && !(_condlock)) _T->lock = NULL; \ |