summaryrefslogtreecommitdiff
path: root/fs/bcachefs/six.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/six.c')
-rw-r--r--fs/bcachefs/six.c51
1 files changed, 31 insertions, 20 deletions
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 3a494c5d1247..538c324f4765 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -169,11 +169,17 @@ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
ret = -1 - SIX_LOCK_write;
}
} else if (type == SIX_LOCK_write && lock->readers) {
- if (try) {
+ if (try)
atomic_add(SIX_LOCK_HELD_write, &lock->state);
- smp_mb__after_atomic();
- }
+ /*
+ * Make sure atomic_add happens before pcpu_read_count and
+ * six_set_bitmask in slow path happens before pcpu_read_count.
+ *
+ * Paired with the smp_mb() in read lock fast path (per-cpu mode)
+ * and the one before atomic_read in read unlock path.
+ */
+ smp_mb();
ret = !pcpu_read_count(lock);
if (try && !ret) {
@@ -333,12 +339,9 @@ static inline bool six_owner_running(struct six_lock *lock)
* acquiring the lock and setting the owner field. If we're an RT task
* that will live-lock because we won't let the owner complete.
*/
- rcu_read_lock();
+ guard(rcu)();
struct task_struct *owner = READ_ONCE(lock->owner);
- bool ret = owner ? owner_on_cpu(owner) : !rt_task(current);
- rcu_read_unlock();
-
- return ret;
+ return owner ? owner_on_cpu(owner) : !rt_or_dl_task(current);
}
static inline bool six_optimistic_spin(struct six_lock *lock,
@@ -485,8 +488,12 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
list_del(&wait->list);
raw_spin_unlock(&lock->wait_lock);
- if (unlikely(acquired))
+ if (unlikely(acquired)) {
do_six_unlock_type(lock, type);
+ } else if (type == SIX_LOCK_write) {
+ six_clear_bitmask(lock, SIX_LOCK_HELD_write);
+ six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read);
+ }
break;
}
@@ -495,10 +502,6 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
__set_current_state(TASK_RUNNING);
out:
- if (ret && type == SIX_LOCK_write) {
- six_clear_bitmask(lock, SIX_LOCK_HELD_write);
- six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read);
- }
trace_contention_end(lock, 0);
return ret;
@@ -610,8 +613,6 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long
if (type != SIX_LOCK_write)
six_release(&lock->dep_map, ip);
- else
- lock->seq++;
if (type == SIX_LOCK_intent &&
lock->intent_lock_recurse) {
@@ -619,6 +620,15 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long
return;
}
+ if (type == SIX_LOCK_write &&
+ lock->write_lock_recurse) {
+ --lock->write_lock_recurse;
+ return;
+ }
+
+ if (type == SIX_LOCK_write)
+ lock->seq++;
+
do_six_unlock_type(lock, type);
}
EXPORT_SYMBOL_GPL(six_unlock_ip);
@@ -729,13 +739,13 @@ void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
atomic_add(l[type].lock_val, &lock->state);
}
break;
+ case SIX_LOCK_write:
+ lock->write_lock_recurse++;
+ fallthrough;
case SIX_LOCK_intent:
EBUG_ON(!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
lock->intent_lock_recurse++;
break;
- case SIX_LOCK_write:
- BUG();
- break;
}
}
EXPORT_SYMBOL_GPL(six_lock_increment);
@@ -837,7 +847,8 @@ void six_lock_exit(struct six_lock *lock)
EXPORT_SYMBOL_GPL(six_lock_exit);
void __six_lock_init(struct six_lock *lock, const char *name,
- struct lock_class_key *key, enum six_lock_init_flags flags)
+ struct lock_class_key *key, enum six_lock_init_flags flags,
+ gfp_t gfp)
{
atomic_set(&lock->state, 0);
raw_spin_lock_init(&lock->wait_lock);
@@ -860,7 +871,7 @@ void __six_lock_init(struct six_lock *lock, const char *name,
* failure if they wish by checking lock->readers, but generally
* will not want to treat it as an error.
*/
- lock->readers = alloc_percpu(unsigned);
+ lock->readers = alloc_percpu_gfp(unsigned, gfp);
}
#endif
}