summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/ext4/ext4.h23
-rw-r--r--fs/ext4/mballoc.c19
2 files changed, 30 insertions, 12 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index d377e02c9767..b014f86951e9 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -3541,23 +3541,28 @@ static inline int ext4_fs_is_busy(struct ext4_sb_info *sbi)
return (atomic_read(&sbi->s_lock_busy) > EXT4_CONTENTION_THRESHOLD);
}
+static inline bool ext4_try_lock_group(struct super_block *sb, ext4_group_t group)
+{
+ if (!spin_trylock(ext4_group_lock_ptr(sb, group)))
+ return false;
+ /*
+ * We're able to grab the lock right away, so drop the lock
+ * contention counter.
+ */
+ atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
+ return true;
+}
+
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
{
- spinlock_t *lock = ext4_group_lock_ptr(sb, group);
- if (spin_trylock(lock))
- /*
- * We're able to grab the lock right away, so drop the
- * lock contention counter.
- */
- atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
- else {
+ if (!ext4_try_lock_group(sb, group)) {
/*
* The lock is busy, so bump the contention counter,
* and then wait on the spin lock.
*/
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1,
EXT4_MAX_CONTENTION);
- spin_lock(lock);
+ spin_lock(ext4_group_lock_ptr(sb, group));
}
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 1e98c5be4e0a..336d65c4f6a2 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -896,7 +896,8 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
bb_largest_free_order_node) {
if (sbi->s_mb_stats)
atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
- if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
+ if (!spin_is_locked(ext4_group_lock_ptr(ac->ac_sb, iter->bb_group)) &&
+ likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
*group = iter->bb_group;
ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
@@ -932,7 +933,8 @@ ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int o
list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
if (sbi->s_mb_stats)
atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
- if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
+ if (!spin_is_locked(ext4_group_lock_ptr(ac->ac_sb, iter->bb_group)) &&
+ likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
grp = iter;
break;
}
@@ -2899,6 +2901,11 @@ repeat:
nr, &prefetch_ios);
}
+ /* prevent unnecessary buddy loading. */
+ if (cr < CR_ANY_FREE &&
+ spin_is_locked(ext4_group_lock_ptr(sb, group)))
+ continue;
+
/* This now checks without needing the buddy page */
ret = ext4_mb_good_group_nolock(ac, group, cr);
if (ret <= 0) {
@@ -2911,7 +2918,13 @@ repeat:
if (err)
goto out;
- ext4_lock_group(sb, group);
+ /* skip busy group */
+ if (cr >= CR_ANY_FREE) {
+ ext4_lock_group(sb, group);
+ } else if (!ext4_try_lock_group(sb, group)) {
+ ext4_mb_unload_buddy(&e4b);
+ continue;
+ }
/*
* We need to check again after locking the