summaryrefslogtreecommitdiff
path: root/fs/btrfs/dev-replace.c
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2018-04-05 01:29:24 +0200
committerDavid Sterba <dsterba@suse.com>2018-12-17 14:51:44 +0100
commit129827e3001fd1e6892a0629b48f9c7c91cbb8b6 (patch)
tree2a99d8de69cd3d439d96a384d41da62ac083e6c1 /fs/btrfs/dev-replace.c
parentceb21a8db48559fd0809e03c4df9eb37743d9170 (diff)
btrfs: dev-replace: swich locking to rw semaphore
This is the first part of removing the custom locking and waiting scheme used for device replace. It was probably copied from extent buffer locking, but there's nothing that would require more than is provided by the common locking primitives. The rw spinlock protects waiting tasks counter in case of incompatible locks and the waitqueue. Same as rw semaphore. This patch only switches the locking primitive, for better bisectability. There should be no functional change other than the overhead of the locking and potential sleeping instead of spinning when the lock is contended. Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/dev-replace.c')
-rw-r--r--fs/btrfs/dev-replace.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 3b272ff60fea..316a29278306 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -1002,12 +1002,12 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
void btrfs_dev_replace_read_lock(struct btrfs_dev_replace *dev_replace)
{
- read_lock(&dev_replace->lock);
+ down_read(&dev_replace->rwsem);
}
void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace)
{
- read_unlock(&dev_replace->lock);
+ up_read(&dev_replace->rwsem);
}
void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace)
@@ -1015,16 +1015,16 @@ void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace)
again:
wait_event(dev_replace->read_lock_wq,
atomic_read(&dev_replace->blocking_readers) == 0);
- write_lock(&dev_replace->lock);
+ down_write(&dev_replace->rwsem);
if (atomic_read(&dev_replace->blocking_readers)) {
- write_unlock(&dev_replace->lock);
+ up_write(&dev_replace->rwsem);
goto again;
}
}
void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace)
{
- write_unlock(&dev_replace->lock);
+ up_write(&dev_replace->rwsem);
}
/* inc blocking cnt and release read lock */
@@ -1033,7 +1033,7 @@ void btrfs_dev_replace_set_lock_blocking(
{
/* only set blocking for read lock */
atomic_inc(&dev_replace->blocking_readers);
- read_unlock(&dev_replace->lock);
+ up_read(&dev_replace->rwsem);
}
void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)