diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 09:07:30 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 09:07:30 -0800 |
commit | b1e243957e9b3ba8e820fb8583bdf18e7c737aa2 (patch) | |
tree | c50e7f561426612570b15cf6df063df8751661fa /fs/btrfs/locking.c | |
parent | 0556161ff9069c938ca5409e1e102ac6f371a1c8 (diff) | |
parent | f65e25e343cfc0e6f4db9a687c4085fad268325d (diff) |
Merge tag 'for-5.1-part1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba:
"This contains usual mix of new features, core changes and fixes; full
list below. I'm planning second pull request, with a few more fixes
that arrived recently but too close to merge window, will send it next
week.
New features:
- support zstd compression levels
- new ioctl to unregister a device from the module (ie. reverse of
device scan)
- scrub prints a message to log when it's about to start or finish
Core changes:
- qgroups can now skip part of a tree that does not get updated
during relocation, because this does not affect the quota
accounting, estimated speedup in run time is about 20%
- the compression workspace management had to be enhanced due to zstd
requirements
- various enospc fixes, when there's high fragmentation the
over-reservation can cause ENOSPC that might not happen after a
flush, in such cases try to wait if the situation improves
Fixes:
- various ioctls could overwrite previous return value if
copy_to_user fails, fix this so the original error is reported
- more reclaim vs GFP_KERNEL fixes
- other cleanups and refactoring
- fix a (valid) lockdep warning in a test when device replace is
destroying worker threads
- make qgroup async transaction commit more aggressive, this avoids
some 'quota limit reached' errors if there are not enough data to
trigger transaction in order to flush
- fix deadlock between snapshot deletion and quotas when backref
walking is called from context that already holds the same locks
- fsync fixes:
- fix fsync after succession of renames of different files
- fix fsync after succession of renames and unlink/rmdir"
* tag 'for-5.1-part1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (92 commits)
btrfs: Remove unnecessary casts in btrfs_read_root_item
Btrfs: remove assertion when searching for a key in a node/leaf
Btrfs: add missing error handling after doing leaf/node binary search
btrfs: drop the lock on error in btrfs_dev_replace_cancel
btrfs: ensure that a DUP or RAID1 block group has exactly two stripes
btrfs: init csum_list before possible free
Btrfs: remove no longer needed range length checks for deduplication
Btrfs: fix fsync after succession of renames and unlink/rmdir
Btrfs: fix fsync after succession of renames of different files
btrfs: honor path->skip_locking in backref code
btrfs: qgroup: Make qgroup async transaction commit more aggressive
btrfs: qgroup: Move reserved data accounting from btrfs_delayed_ref_head to btrfs_qgroup_extent_record
btrfs: scrub: remove unused nocow worker pointer
btrfs: scrub: add assertions for worker pointers
btrfs: scrub: convert scrub_workers_refcnt to refcount_t
btrfs: scrub: add scrub_lock lockdep check in scrub_workers_get
btrfs: scrub: fix circular locking dependency warning
btrfs: fix comment its device list mutex not volume lock
btrfs: extent_io: Kill the forward declaration of flush_write_bio
btrfs: Fix grossly misleading argument names in extent io search
...
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r-- | fs/btrfs/locking.c | 108 |
1 files changed, 53 insertions, 55 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 1da768e5ef75..82b84e4daad1 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -14,43 +14,58 @@ static void btrfs_assert_tree_read_locked(struct extent_buffer *eb); -/* - * if we currently have a spinning reader or writer lock - * (indicated by the rw flag) this will bump the count - * of blocking holders and drop the spinlock. - */ -void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) +void btrfs_set_lock_blocking_read(struct extent_buffer *eb) { /* - * no lock is required. The lock owner may change if - * we have a read lock, but it won't change to or away - * from us. If we have the write lock, we are the owner - * and it'll never change. + * No lock is required. The lock owner may change if we have a read + * lock, but it won't change to or away from us. If we have the write + * lock, we are the owner and it'll never change. */ if (eb->lock_nested && current->pid == eb->lock_owner) return; - if (rw == BTRFS_WRITE_LOCK) { - if (atomic_read(&eb->blocking_writers) == 0) { - WARN_ON(atomic_read(&eb->spinning_writers) != 1); - atomic_dec(&eb->spinning_writers); - btrfs_assert_tree_locked(eb); - atomic_inc(&eb->blocking_writers); - write_unlock(&eb->lock); - } - } else if (rw == BTRFS_READ_LOCK) { - btrfs_assert_tree_read_locked(eb); - atomic_inc(&eb->blocking_readers); - WARN_ON(atomic_read(&eb->spinning_readers) == 0); - atomic_dec(&eb->spinning_readers); - read_unlock(&eb->lock); + btrfs_assert_tree_read_locked(eb); + atomic_inc(&eb->blocking_readers); + WARN_ON(atomic_read(&eb->spinning_readers) == 0); + atomic_dec(&eb->spinning_readers); + read_unlock(&eb->lock); +} + +void btrfs_set_lock_blocking_write(struct extent_buffer *eb) +{ + /* + * No lock is required. The lock owner may change if we have a read + * lock, but it won't change to or away from us. If we have the write + * lock, we are the owner and it'll never change. + */ + if (eb->lock_nested && current->pid == eb->lock_owner) + return; + if (atomic_read(&eb->blocking_writers) == 0) { + WARN_ON(atomic_read(&eb->spinning_writers) != 1); + atomic_dec(&eb->spinning_writers); + btrfs_assert_tree_locked(eb); + atomic_inc(&eb->blocking_writers); + write_unlock(&eb->lock); } } -/* - * if we currently have a blocking lock, take the spinlock - * and drop our blocking count - */ -void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) +void btrfs_clear_lock_blocking_read(struct extent_buffer *eb) +{ + /* + * No lock is required. The lock owner may change if we have a read + * lock, but it won't change to or away from us. If we have the write + * lock, we are the owner and it'll never change. + */ + if (eb->lock_nested && current->pid == eb->lock_owner) + return; + BUG_ON(atomic_read(&eb->blocking_readers) == 0); + read_lock(&eb->lock); + atomic_inc(&eb->spinning_readers); + /* atomic_dec_and_test implies a barrier */ + if (atomic_dec_and_test(&eb->blocking_readers)) + cond_wake_up_nomb(&eb->read_lock_wq); +} + +void btrfs_clear_lock_blocking_write(struct extent_buffer *eb) { /* * no lock is required. The lock owner may change if @@ -60,23 +75,13 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) */ if (eb->lock_nested && current->pid == eb->lock_owner) return; - - if (rw == BTRFS_WRITE_LOCK_BLOCKING) { - BUG_ON(atomic_read(&eb->blocking_writers) != 1); - write_lock(&eb->lock); - WARN_ON(atomic_read(&eb->spinning_writers)); - atomic_inc(&eb->spinning_writers); - /* atomic_dec_and_test implies a barrier */ - if (atomic_dec_and_test(&eb->blocking_writers)) - cond_wake_up_nomb(&eb->write_lock_wq); - } else if (rw == BTRFS_READ_LOCK_BLOCKING) { - BUG_ON(atomic_read(&eb->blocking_readers) == 0); - read_lock(&eb->lock); - atomic_inc(&eb->spinning_readers); - /* atomic_dec_and_test implies a barrier */ - if (atomic_dec_and_test(&eb->blocking_readers)) - cond_wake_up_nomb(&eb->read_lock_wq); - } + BUG_ON(atomic_read(&eb->blocking_writers) != 1); + write_lock(&eb->lock); + WARN_ON(atomic_read(&eb->spinning_writers)); + atomic_inc(&eb->spinning_writers); + /* atomic_dec_and_test implies a barrier */ + if (atomic_dec_and_test(&eb->blocking_writers)) + cond_wake_up_nomb(&eb->write_lock_wq); } /* @@ -232,16 +237,9 @@ again: wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); write_lock(&eb->lock); - if (atomic_read(&eb->blocking_readers)) { + if (atomic_read(&eb->blocking_readers) || + atomic_read(&eb->blocking_writers)) { write_unlock(&eb->lock); - wait_event(eb->read_lock_wq, - atomic_read(&eb->blocking_readers) == 0); - goto again; - } - if (atomic_read(&eb->blocking_writers)) { - write_unlock(&eb->lock); - wait_event(eb->write_lock_wq, - atomic_read(&eb->blocking_writers) == 0); goto again; } WARN_ON(atomic_read(&eb->spinning_writers)); |