diff options
Diffstat (limited to 'fs/btrfs/locking.c')
| -rw-r--r-- | fs/btrfs/locking.c | 101 |
1 files changed, 39 insertions, 62 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 870528d87526..0035851d72b0 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -8,11 +8,10 @@ #include <linux/spinlock.h> #include <linux/page-flags.h> #include <asm/bug.h> -#include "misc.h" +#include <trace/events/btrfs.h> #include "ctree.h" #include "extent_io.h" #include "locking.h" -#include "accessors.h" /* * Lockdep class keys for extent_buffer->lock's in this root. For a given @@ -57,8 +56,8 @@ static struct btrfs_lockdep_keyset { u64 id; /* root objectid */ - /* Longest entry: btrfs-free-space-00 */ - char names[BTRFS_MAX_LEVEL][20]; + /* Longest entry: btrfs-block-group-00 */ + char names[BTRFS_MAX_LEVEL][24]; struct lock_class_key keys[BTRFS_MAX_LEVEL]; } btrfs_lockdep_keysets[] = { { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") }, @@ -72,6 +71,8 @@ static struct btrfs_lockdep_keyset { { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") }, { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") }, { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") }, + { .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") }, + { .id = BTRFS_RAID_STRIPE_TREE_OBJECTID, DEFINE_NAME("raid-stripe") }, { .id = 0, DEFINE_NAME("tree") }, }; @@ -82,7 +83,7 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int { struct btrfs_lockdep_keyset *ks; - BUG_ON(level >= ARRAY_SIZE(ks->keys)); + ASSERT(level < ARRAY_SIZE(ks->keys)); /* Find the matching keyset, id 0 is the default entry */ for (ks = btrfs_lockdep_keysets; ks->id; ks++) @@ -95,12 +96,21 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb) { if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) - btrfs_set_buffer_lockdep_class(root->root_key.objectid, + btrfs_set_buffer_lockdep_class(btrfs_root_id(root), eb, btrfs_header_level(eb)); } #endif +#ifdef CONFIG_BTRFS_DEBUG +static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) +{ + eb->lock_owner = owner; +} +#else +static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) { } +#endif + /* * Extent buffer locking * ===================== @@ -118,14 +128,14 @@ void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buff */ /* - * __btrfs_tree_read_lock - lock extent buffer for read + * btrfs_tree_read_lock_nested - lock extent buffer for read * @eb: the eb to be locked * @nest: the nesting level to be used for lockdep * * This takes the read lock on the extent buffer, using the specified nesting * level for lockdep purposes. */ -void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest) +void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest) { u64 start_ns = 0; @@ -136,38 +146,18 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne trace_btrfs_tree_read_lock(eb, start_ns); } -void btrfs_tree_read_lock(struct extent_buffer *eb) -{ - __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL); -} - /* * Try-lock for read. * - * Return 1 if the rwlock has been taken, 0 otherwise + * Return true if the rwlock has been taken, false otherwise */ -int btrfs_try_tree_read_lock(struct extent_buffer *eb) +bool btrfs_try_tree_read_lock(struct extent_buffer *eb) { if (down_read_trylock(&eb->lock)) { trace_btrfs_try_tree_read_lock(eb); - return 1; - } - return 0; -} - -/* - * Try-lock for write. - * - * Return 1 if the rwlock has been taken, 0 otherwise - */ -int btrfs_try_tree_write_lock(struct extent_buffer *eb) -{ - if (down_write_trylock(&eb->lock)) { - eb->lock_owner = current->pid; - trace_btrfs_try_tree_write_lock(eb); - return 1; + return true; } - return 0; + return false; } /* @@ -180,13 +170,14 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) } /* - * __btrfs_tree_lock - lock eb for write + * Lock eb for write. + * * @eb: the eb to lock * @nest: the nesting to use for the lock * * Returns with the eb->lock write locked. */ -void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest) +void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest) __acquires(&eb->lock) { u64 start_ns = 0; @@ -195,22 +186,17 @@ void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest) start_ns = ktime_get_ns(); down_write_nested(&eb->lock, nest); - eb->lock_owner = current->pid; + btrfs_set_eb_lock_owner(eb, current->pid); trace_btrfs_tree_lock(eb, start_ns); } -void btrfs_tree_lock(struct extent_buffer *eb) -{ - __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL); -} - /* * Release the write lock. */ void btrfs_tree_unlock(struct extent_buffer *eb) { trace_btrfs_tree_unlock(eb); - eb->lock_owner = 0; + btrfs_set_eb_lock_owner(eb, 0); up_write(&eb->lock); } @@ -325,24 +311,12 @@ struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root) * acquire the lock. */ -int btrfs_drew_lock_init(struct btrfs_drew_lock *lock) +void btrfs_drew_lock_init(struct btrfs_drew_lock *lock) { - int ret; - - ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL); - if (ret) - return ret; - atomic_set(&lock->readers, 0); + atomic_set(&lock->writers, 0); init_waitqueue_head(&lock->pending_readers); init_waitqueue_head(&lock->pending_writers); - - return 0; -} - -void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock) -{ - percpu_counter_destroy(&lock->writers); } /* Return true if acquisition is successful, false otherwise */ @@ -351,10 +325,10 @@ bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock) if (atomic_read(&lock->readers)) return false; - percpu_counter_inc(&lock->writers); + atomic_inc(&lock->writers); /* Ensure writers count is updated before we check for pending readers */ - smp_mb(); + smp_mb__after_atomic(); if (atomic_read(&lock->readers)) { btrfs_drew_write_unlock(lock); return false; @@ -374,8 +348,12 @@ void btrfs_drew_write_lock(struct btrfs_drew_lock *lock) void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock) { - percpu_counter_dec(&lock->writers); - cond_wake_up(&lock->pending_readers); + /* + * atomic_dec_and_test() implies a full barrier, so woken up readers are + * guaranteed to see the decrement. + */ + if (atomic_dec_and_test(&lock->writers)) + wake_up(&lock->pending_readers); } void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) @@ -383,15 +361,14 @@ void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) atomic_inc(&lock->readers); /* - * Ensure the pending reader count is perceieved BEFORE this reader + * Ensure the pending reader count is perceived BEFORE this reader * goes to sleep in case of active writers. This guarantees new writers * won't be allowed and that the current reader will be woken up when * the last active writer finishes its jobs. */ smp_mb__after_atomic(); - wait_event(lock->pending_readers, - percpu_counter_sum(&lock->writers) == 0); + wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0); } void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock) |
