summaryrefslogtreecommitdiff
path: root/kernel/locking/test-ww_mutex.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-01-21 10:10:24 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-01-21 10:10:24 -0800
commit8838a1a2d219a86ab05e679c73f68dd75a25aca5 (patch)
treeec0d60fb3d798111ff88feffb4a2c23c227988f1 /kernel/locking/test-ww_mutex.c
parentb9d8a295ed6bc3cee846f8e3f27c9c6e9ebe43f2 (diff)
parentcb4ccc70344c3dc29a5d0045361a4f0959bc5a6b (diff)
Merge tag 'locking-core-2025-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Lockdep: - Improve and fix lockdep bitsize limits, clarify the Kconfig documentation (Carlos Llamas) - Fix lockdep build warning on Clang related to chain_hlock_class_idx() inlining (Andy Shevchenko) - Relax the requirements of PROVE_RAW_LOCK_NESTING arch support by not tying it to ARCH_SUPPORTS_RT unnecessarily (Waiman Long) Rust integration: - Support lock pointers managed by the C side (Lyude Paul) - Support guard types (Lyude Paul) - Update MAINTAINERS file filters to include the Rust locking code (Boqun Feng) Wake-queues: - Add raw_spin_*wake() helpers to simplify locking code (John Stultz) SMP cross-calls: - Fix potential data update race by evaluating the local cond_func() before IPI side-effects (Mathieu Desnoyers) Guard primitives: - Ease [c]tags based searches by including the cleanup/guard type primitives (Peter Zijlstra) ww_mutexes: - Simplify the ww_mutex self-test code via swap() (Thorsten Blum) Static calls: - Update the static calls MAINTAINERS file-pattern (Jiri Slaby)" * tag 'locking-core-2025-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: MAINTAINERS: Add static_call_inline.c to STATIC BRANCH/CALL cleanup, tags: Create tags for the cleanup primitives sched/wake_q: Add helper to call wake_up_q after unlock with preemption disabled rust: sync: Add lock::Backend::assert_is_held() rust: sync: Add SpinLockGuard type alias rust: sync: Add MutexGuard type alias rust: sync: Make Guard::new() public rust: sync: Add Lock::from_raw() for Lock<(), B> locking: MAINTAINERS: Start watching Rust locking primitives lockdep: Move lockdep_assert_locked() under #ifdef CONFIG_PROVE_LOCKING lockdep: Mark chain_hlock_class_idx() with __maybe_unused lockdep: Document MAX_LOCKDEP_CHAIN_HLOCKS calculation lockdep: Clarify size for LOCKDEP_*_BITS configs lockdep: Fix upper limit for LOCKDEP_*_BITS configs locking/ww_mutex/test: Use swap() macro smp/scf: Evaluate local cond_func() before IPI side-effects locking/lockdep: Enforce PROVE_RAW_LOCK_NESTING only if ARCH_SUPPORTS_RT
Diffstat (limited to 'kernel/locking/test-ww_mutex.c')
-rw-r--r--kernel/locking/test-ww_mutex.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 5d58b2c0ef98..bcb1b9fea588 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -404,7 +404,7 @@ static inline u32 prandom_u32_below(u32 ceil)
static int *get_random_order(int count)
{
int *order;
- int n, r, tmp;
+ int n, r;
order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
if (!order)
@@ -415,11 +415,8 @@ static int *get_random_order(int count)
for (n = count - 1; n > 1; n--) {
r = prandom_u32_below(n + 1);
- if (r != n) {
- tmp = order[n];
- order[n] = order[r];
- order[r] = tmp;
- }
+ if (r != n)
+ swap(order[n], order[r]);
}
return order;