diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2023-05-20 20:57:55 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:10:01 -0400 |
commit | 0d2234a79e877b1bfa71b2c8c712a155be419827 (patch) | |
tree | 3ddf278178dc2b05ce81e20b0ed79dde52b8890d /fs/bcachefs/six.c | |
parent | 01bf56a9771466147d94a013bc5678d0ed1b1382 (diff) |
six locks: Kill six_lock_pcpu_(alloc|free)
six_lock_pcpu_alloc() is an unsafe interface: it's not safe to allocate
or free the percpu reader count on an existing lock that's in use, the
only safe time to allocate percpu readers is when the lock is first
being initialized.
This patch adds a flags parameter to six_lock_init(), and instead of
six_lock_pcpu_free() we now expose six_lock_exit(), which does the same
thing but is less likely to be misused.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/six.c')
-rw-r--r-- | fs/bcachefs/six.c | 53 |
1 files changed, 34 insertions, 19 deletions
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index 0f9e1bf31008..f75387b9da88 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -814,25 +814,6 @@ void six_lock_wakeup_all(struct six_lock *lock) } EXPORT_SYMBOL_GPL(six_lock_wakeup_all); -void six_lock_pcpu_free(struct six_lock *lock) -{ - BUG_ON(lock->readers && pcpu_read_count(lock)); - BUG_ON(lock->state.read_lock); - - free_percpu(lock->readers); - lock->readers = NULL; -} -EXPORT_SYMBOL_GPL(six_lock_pcpu_free); - -void six_lock_pcpu_alloc(struct six_lock *lock) -{ -#ifdef __KERNEL__ - if (!lock->readers) - lock->readers = alloc_percpu(unsigned); -#endif -} -EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc); - /* * Returns lock held counts, for both read and intent */ @@ -860,3 +841,37 @@ void six_lock_readers_add(struct six_lock *lock, int nr) atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter); } EXPORT_SYMBOL_GPL(six_lock_readers_add); + +void six_lock_exit(struct six_lock *lock) +{ + WARN_ON(lock->readers && pcpu_read_count(lock)); + WARN_ON(lock->state.read_lock); + + free_percpu(lock->readers); + lock->readers = NULL; +} +EXPORT_SYMBOL_GPL(six_lock_exit); + +void __six_lock_init(struct six_lock *lock, const char *name, + struct lock_class_key *key, enum six_lock_init_flags flags) +{ + atomic64_set(&lock->state.counter, 0); + raw_spin_lock_init(&lock->wait_lock); + INIT_LIST_HEAD(&lock->wait_list); +#ifdef CONFIG_DEBUG_LOCK_ALLOC + debug_check_no_locks_freed((void *) lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif + + if (flags & SIX_LOCK_INIT_PCPU) { + /* + * We don't return an error here on memory allocation failure + * since percpu is an optimization, and locks will work with the + * same semantics in non-percpu mode: callers can check for + * failure if they wish by checking lock->readers, but generally + * will not want to treat it as an error. + */ + lock->readers = alloc_percpu(unsigned); + } +} +EXPORT_SYMBOL_GPL(__six_lock_init); |