diff options
Diffstat (limited to 'fs/btrfs/space-info.c')
| -rw-r--r-- | fs/btrfs/space-info.c | 1030 |
1 files changed, 701 insertions, 329 deletions
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index d7e8cd4f140c..6babbe333741 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/spinlock.h> +#include <linux/minmax.h> #include "misc.h" #include "ctree.h" #include "space-info.h" @@ -9,10 +11,11 @@ #include "ordered-data.h" #include "transaction.h" #include "block-group.h" -#include "zoned.h" #include "fs.h" #include "accessors.h" #include "extent-tree.h" +#include "zoned.h" +#include "delayed-inode.h" /* * HOW DOES SPACE RESERVATION WORK @@ -48,11 +51,11 @@ * num_bytes we want to reserve. * * ->reserve - * space_info->bytes_may_reserve += num_bytes + * space_info->bytes_may_use += num_bytes * * ->extent allocation * Call btrfs_add_reserved_bytes() which does - * space_info->bytes_may_reserve -= num_bytes + * space_info->bytes_may_use -= num_bytes * space_info->bytes_reserved += extent_bytes * * ->insert reference @@ -65,7 +68,7 @@ * Assume we are unable to simply make the reservation because we do not have * enough space * - * -> __reserve_bytes + * -> reserve_bytes * create a reserve_ticket with ->bytes set to our reservation, add it to * the tail of space_info->tickets, kick async flush thread * @@ -126,6 +129,14 @@ * churn a lot and we can avoid making some extent tree modifications if we * are able to delay for as long as possible. * + * RESET_ZONES + * This state works only for the zoned mode. On the zoned mode, we cannot + * reuse once allocated then freed region until we reset the zone, due to + * the sequential write zone requirement. The RESET_ZONES state resets the + * zones of an unused block group and let us reuse the space. The reusing + * is faster than removing the block group and allocating another block + * group on the zones. + * * ALLOC_CHUNK * We will skip this the first time through space reservation, because of * overcommit and we don't want to have a lot of useless metadata space when @@ -162,15 +173,14 @@ * thing with or without extra unallocated space. */ -u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, - bool may_use_included) -{ - ASSERT(s_info); - return s_info->bytes_used + s_info->bytes_reserved + - s_info->bytes_pinned + s_info->bytes_readonly + - s_info->bytes_zone_unusable + - (may_use_included ? s_info->bytes_may_use : 0); -} +struct reserve_ticket { + u64 bytes; + int error; + bool steal; + struct list_head list; + wait_queue_head_t wait; + spinlock_t lock; +}; /* * after adding space to the filesystem, we need to clear the full flags @@ -182,7 +192,7 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info) struct btrfs_space_info *found; list_for_each_entry(found, head, list) - found->full = 0; + found->full = false; } /* @@ -191,6 +201,8 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info) */ #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75) +#define BTRFS_UNALLOC_BLOCK_GROUP_TARGET (10ULL) + /* * Calculate chunk size depending on volume type (regular or zoned). */ @@ -199,7 +211,7 @@ static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags) if (btrfs_is_zoned(fs_info)) return fs_info->zone_size; - ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK); + ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK, "flags=%llu", flags); if (flags & BTRFS_BLOCK_GROUP_DATA) return BTRFS_MAX_DATA_CHUNK_SIZE; @@ -222,18 +234,11 @@ void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, WRITE_ONCE(space_info->chunk_size, chunk_size); } -static int create_space_info(struct btrfs_fs_info *info, u64 flags) +static void init_space_info(struct btrfs_fs_info *info, + struct btrfs_space_info *space_info, u64 flags) { - - struct btrfs_space_info *space_info; - int i; - int ret; - - space_info = kzalloc(sizeof(*space_info), GFP_NOFS); - if (!space_info) - return -ENOMEM; - - for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) + space_info->fs_info = info; + for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++) INIT_LIST_HEAD(&space_info->block_groups[i]); init_rwsem(&space_info->groups_sem); spin_lock_init(&space_info->lock); @@ -244,11 +249,67 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags) INIT_LIST_HEAD(&space_info->priority_tickets); space_info->clamp = 1; btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags)); + space_info->subgroup_id = BTRFS_SUB_GROUP_PRIMARY; if (btrfs_is_zoned(info)) space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; +} + +static int create_space_info_sub_group(struct btrfs_space_info *parent, u64 flags, + enum btrfs_space_info_sub_group id, int index) +{ + struct btrfs_fs_info *fs_info = parent->fs_info; + struct btrfs_space_info *sub_group; + int ret; + + ASSERT(parent->subgroup_id == BTRFS_SUB_GROUP_PRIMARY, + "parent->subgroup_id=%d", parent->subgroup_id); + ASSERT(id != BTRFS_SUB_GROUP_PRIMARY, "id=%d", id); + + sub_group = kzalloc(sizeof(*sub_group), GFP_NOFS); + if (!sub_group) + return -ENOMEM; + + init_space_info(fs_info, sub_group, flags); + parent->sub_group[index] = sub_group; + sub_group->parent = parent; + sub_group->subgroup_id = id; + + ret = btrfs_sysfs_add_space_info_type(sub_group); + if (ret) { + kfree(sub_group); + parent->sub_group[index] = NULL; + } + return ret; +} + +static int create_space_info(struct btrfs_fs_info *info, u64 flags) +{ + + struct btrfs_space_info *space_info; + int ret = 0; + + space_info = kzalloc(sizeof(*space_info), GFP_NOFS); + if (!space_info) + return -ENOMEM; + + init_space_info(info, space_info, flags); + + if (btrfs_is_zoned(info)) { + if (flags & BTRFS_BLOCK_GROUP_DATA) + ret = create_space_info_sub_group(space_info, flags, + BTRFS_SUB_GROUP_DATA_RELOC, + 0); + else if (flags & BTRFS_BLOCK_GROUP_METADATA) + ret = create_space_info_sub_group(space_info, flags, + BTRFS_SUB_GROUP_TREELOG, + 0); - ret = btrfs_sysfs_add_space_info_type(info, space_info); + if (ret) + return ret; + } + + ret = btrfs_sysfs_add_space_info_type(space_info); if (ret) return ret; @@ -299,31 +360,29 @@ out: void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, struct btrfs_block_group *block_group) { - struct btrfs_space_info *found; + struct btrfs_space_info *space_info = block_group->space_info; int factor, index; factor = btrfs_bg_type_to_factor(block_group->flags); - found = btrfs_find_space_info(info, block_group->flags); - ASSERT(found); - spin_lock(&found->lock); - found->total_bytes += block_group->length; - found->disk_total += block_group->length * factor; - found->bytes_used += block_group->used; - found->disk_used += block_group->used * factor; - found->bytes_readonly += block_group->bytes_super; - found->bytes_zone_unusable += block_group->zone_unusable; + spin_lock(&space_info->lock); + space_info->total_bytes += block_group->length; + space_info->disk_total += block_group->length * factor; + space_info->bytes_used += block_group->used; + space_info->disk_used += block_group->used * factor; + space_info->bytes_readonly += block_group->bytes_super; + btrfs_space_info_update_bytes_zone_unusable(space_info, block_group->zone_unusable); if (block_group->length > 0) - found->full = 0; - btrfs_try_granting_tickets(info, found); - spin_unlock(&found->lock); + space_info->full = false; + btrfs_try_granting_tickets(space_info); + spin_unlock(&space_info->lock); - block_group->space_info = found; + block_group->space_info = space_info; index = btrfs_bg_flags_to_raid_index(block_group->flags); - down_write(&found->groups_sem); - list_add_tail(&block_group->list, &found->block_groups[index]); - up_write(&found->groups_sem); + down_write(&space_info->groups_sem); + list_add_tail(&block_group->list, &space_info->block_groups[index]); + up_write(&space_info->groups_sem); } struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, @@ -341,12 +400,35 @@ struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, return NULL; } -static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - enum btrfs_reserve_flush_enum flush) +static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info) +{ + struct btrfs_space_info *data_sinfo; + u64 data_chunk_size; + + /* + * Calculate the data_chunk_size, space_info->chunk_size is the + * "optimal" chunk size based on the fs size. However when we actually + * allocate the chunk we will strip this down further, making it no + * more than 10% of the disk or 1G, whichever is smaller. + * + * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size) + * as it is. + */ + data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); + if (btrfs_is_zoned(fs_info)) + return data_sinfo->chunk_size; + data_chunk_size = min(data_sinfo->chunk_size, + mult_perc(fs_info->fs_devices->total_rw_bytes, 10)); + return min_t(u64, data_chunk_size, SZ_1G); +} + +static u64 calc_available_free_space(const struct btrfs_space_info *space_info, + enum btrfs_reserve_flush_enum flush) { + struct btrfs_fs_info *fs_info = space_info->fs_info; u64 profile; u64 avail; + u64 data_chunk_size; int factor; if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) @@ -364,6 +446,27 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, */ factor = btrfs_bg_type_to_factor(profile); avail = div_u64(avail, factor); + if (avail == 0) + return 0; + + data_chunk_size = calc_effective_data_chunk_size(fs_info); + + /* + * Since data allocations immediately use block groups as part of the + * reservation, because we assume that data reservations will == actual + * usage, we could potentially overcommit and then immediately have that + * available space used by a data allocation, which could put us in a + * bind when we get close to filling the file system. + * + * To handle this simply remove the data_chunk_size from the available + * space. If we are relatively empty this won't affect our ability to + * overcommit much, and if we're very close to full it'll keep us from + * getting into a position where we've given ourselves very little + * metadata wiggle room. + */ + if (avail <= data_chunk_size) + return 0; + avail -= data_chunk_size; /* * If we aren't flushing all things, let us overcommit up to @@ -374,47 +477,91 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, avail >>= 3; else avail >>= 1; + + /* + * On the zoned mode, we always allocate one zone as one chunk. + * Returning non-zone size aligned bytes here will result in + * less pressure for the async metadata reclaim process, and it + * will over-commit too much leading to ENOSPC. Align down to the + * zone size to avoid that. + */ + if (btrfs_is_zoned(fs_info)) + avail = ALIGN_DOWN(avail, fs_info->zone_size); + return avail; } -int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, u64 bytes, - enum btrfs_reserve_flush_enum flush) +static inline bool check_can_overcommit(const struct btrfs_space_info *space_info, + u64 space_info_used_bytes, u64 bytes, + enum btrfs_reserve_flush_enum flush) +{ + const u64 avail = calc_available_free_space(space_info, flush); + + return (space_info_used_bytes + bytes < space_info->total_bytes + avail); +} + +static inline bool can_overcommit(const struct btrfs_space_info *space_info, + u64 space_info_used_bytes, u64 bytes, + enum btrfs_reserve_flush_enum flush) +{ + /* Don't overcommit when in mixed mode. */ + if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) + return false; + + return check_can_overcommit(space_info, space_info_used_bytes, bytes, flush); +} + +bool btrfs_can_overcommit(const struct btrfs_space_info *space_info, u64 bytes, + enum btrfs_reserve_flush_enum flush) { - u64 avail; u64 used; /* Don't overcommit when in mixed mode */ if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) - return 0; + return false; used = btrfs_space_info_used(space_info, true); - avail = calc_available_free_space(fs_info, space_info, flush); - if (used + bytes < space_info->total_bytes + avail) - return 1; - return 0; + return check_can_overcommit(space_info, used, bytes, flush); } static void remove_ticket(struct btrfs_space_info *space_info, - struct reserve_ticket *ticket) + struct reserve_ticket *ticket, int error) { + lockdep_assert_held(&space_info->lock); + if (!list_empty(&ticket->list)) { list_del_init(&ticket->list); - ASSERT(space_info->reclaim_size >= ticket->bytes); + ASSERT(space_info->reclaim_size >= ticket->bytes, + "space_info->reclaim_size=%llu ticket->bytes=%llu", + space_info->reclaim_size, ticket->bytes); space_info->reclaim_size -= ticket->bytes; } + + spin_lock(&ticket->lock); + /* + * If we are called from a task waiting on the ticket, it may happen + * that before it sets an error on the ticket, a reclaim task was able + * to satisfy the ticket. In that case ignore the error. + */ + if (error && ticket->bytes > 0) + ticket->error = error; + else + ticket->bytes = 0; + + wake_up(&ticket->wait); + spin_unlock(&ticket->lock); } /* * This is for space we already have accounted in space_info->bytes_may_use, so * basically when we're returning space from block_rsv's. */ -void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info) +void btrfs_try_granting_tickets(struct btrfs_space_info *space_info) { struct list_head *head; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; + u64 used = btrfs_space_info_used(space_info, true); lockdep_assert_held(&space_info->lock); @@ -422,21 +569,18 @@ void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, again: while (!list_empty(head)) { struct reserve_ticket *ticket; - u64 used = btrfs_space_info_used(space_info, true); + u64 used_after; ticket = list_first_entry(head, struct reserve_ticket, list); + used_after = used + ticket->bytes; /* Check and see if our ticket can be satisfied now. */ - if ((used + ticket->bytes <= space_info->total_bytes) || - btrfs_can_overcommit(fs_info, space_info, ticket->bytes, - flush)) { - btrfs_space_info_update_bytes_may_use(fs_info, - space_info, - ticket->bytes); - remove_ticket(space_info, ticket); - ticket->bytes = 0; + if (used_after <= space_info->total_bytes || + can_overcommit(space_info, used, ticket->bytes, flush)) { + btrfs_space_info_update_bytes_may_use(space_info, ticket->bytes); + remove_ticket(space_info, ticket, 0); space_info->tickets_id++; - wake_up(&ticket->wait); + used = used_after; } else { break; } @@ -483,15 +627,16 @@ static void dump_global_block_rsv(struct btrfs_fs_info *fs_info) DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); } -static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *info) +static void __btrfs_dump_space_info(const struct btrfs_space_info *info) { + const struct btrfs_fs_info *fs_info = info->fs_info; const char *flag_str = space_info_flag_to_str(info); lockdep_assert_held(&info->lock); /* The free space could be negative in case of overcommit */ - btrfs_info(fs_info, "space_info %s has %lld free, is %sfull", - flag_str, + btrfs_info(fs_info, + "space_info %s (sub-group id %d) has %lld free, is %sfull", + flag_str, info->subgroup_id, (s64)(info->total_bytes - btrfs_space_info_used(info, true)), info->full ? "" : "not "); btrfs_info(fs_info, @@ -501,16 +646,16 @@ static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, info->bytes_readonly, info->bytes_zone_unusable); } -void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *info, u64 bytes, - int dump_block_groups) +void btrfs_dump_space_info(struct btrfs_space_info *info, u64 bytes, + bool dump_block_groups) { + struct btrfs_fs_info *fs_info = info->fs_info; struct btrfs_block_group *cache; u64 total_avail = 0; int index = 0; spin_lock(&info->lock); - __btrfs_dump_space_info(fs_info, info); + __btrfs_dump_space_info(info); dump_global_block_rsv(fs_info); spin_unlock(&info->lock); @@ -524,8 +669,7 @@ again: spin_lock(&cache->lock); avail = cache->length - cache->used - cache->pinned - - cache->reserved - cache->delalloc_bytes - - cache->bytes_super - cache->zone_unusable; + cache->reserved - cache->bytes_super - cache->zone_unusable; btrfs_info(fs_info, "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s", cache->start, cache->length, cache->used, cache->pinned, @@ -556,28 +700,14 @@ static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info, return nr; } -static inline u64 calc_delayed_refs_nr(const struct btrfs_fs_info *fs_info, - u64 to_reclaim) -{ - const u64 bytes = btrfs_calc_delayed_ref_bytes(fs_info, 1); - u64 nr; - - nr = div64_u64(to_reclaim, bytes); - if (!nr) - nr = 1; - return nr; -} - -#define EXTENT_SIZE_PER_ITEM SZ_256K - /* * shrink metadata reservation for delalloc */ -static void shrink_delalloc(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, +static void shrink_delalloc(struct btrfs_space_info *space_info, u64 to_reclaim, bool wait_ordered, bool for_preempt) { + struct btrfs_fs_info *fs_info = space_info->fs_info; struct btrfs_trans_handle *trans; u64 delalloc_bytes; u64 ordered_bytes; @@ -669,7 +799,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, skip_async: loops++; if (wait_ordered && !trans) { - btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); + btrfs_wait_ordered_roots(fs_info, items, NULL); } else { time_left = schedule_timeout_killable(1); if (time_left) @@ -704,10 +834,10 @@ skip_async: * and may fail for various reasons. The caller is supposed to examine the * state of @space_info to detect the outcome. */ -static void flush_space(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, u64 num_bytes, - enum btrfs_flush_state state, bool for_preempt) +static void flush_space(struct btrfs_space_info *space_info, u64 num_bytes, + enum btrfs_flush_state state, bool for_preempt) { + struct btrfs_fs_info *fs_info = space_info->fs_info; struct btrfs_root *root = fs_info->tree_root; struct btrfs_trans_handle *trans; int nr; @@ -736,7 +866,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, case FLUSH_DELALLOC_FULL: if (state == FLUSH_DELALLOC_FULL) num_bytes = U64_MAX; - shrink_delalloc(fs_info, space_info, num_bytes, + shrink_delalloc(space_info, num_bytes, state != FLUSH_DELALLOC, for_preempt); break; case FLUSH_DELAYED_REFS_NR: @@ -749,10 +879,9 @@ static void flush_space(struct btrfs_fs_info *fs_info, break; } if (state == FLUSH_DELAYED_REFS_NR) - nr = calc_delayed_refs_nr(fs_info, num_bytes); + btrfs_run_delayed_refs(trans, num_bytes); else - nr = 0; - btrfs_run_delayed_refs(trans, nr); + btrfs_run_delayed_refs(trans, 0); btrfs_end_transaction(trans); break; case ALLOC_CHUNK: @@ -762,7 +891,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, ret = PTR_ERR(trans); break; } - ret = btrfs_chunk_alloc(trans, + ret = btrfs_chunk_alloc(trans, space_info, btrfs_get_alloc_profile(fs_info, space_info->flags), (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : CHUNK_ALLOC_FORCE); @@ -789,14 +918,10 @@ static void flush_space(struct btrfs_fs_info *fs_info, * because that does not wait for a transaction to fully commit * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED). */ - trans = btrfs_attach_transaction_barrier(root); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - if (ret == -ENOENT) - ret = 0; - break; - } - ret = btrfs_commit_transaction(trans); + ret = btrfs_commit_current_transaction(root); + break; + case RESET_ZONES: + ret = btrfs_reset_unused_block_groups(space_info, num_bytes); break; default: ret = -ENOSPC; @@ -808,9 +933,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, return; } -static inline u64 -btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info) +static u64 btrfs_calc_reclaim_metadata_size(const struct btrfs_space_info *space_info) { u64 used; u64 avail; @@ -818,8 +941,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, lockdep_assert_held(&space_info->lock); - avail = calc_available_free_space(fs_info, space_info, - BTRFS_RESERVE_FLUSH_ALL); + avail = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL); used = btrfs_space_info_used(space_info, true); /* @@ -834,18 +956,25 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, return to_reclaim; } -static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info) +static bool need_preemptive_reclaim(const struct btrfs_space_info *space_info) { - u64 global_rsv_size = fs_info->global_block_rsv.reserved; + struct btrfs_fs_info *fs_info = space_info->fs_info; + const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv); u64 ordered, delalloc; u64 thresh; u64 used; - thresh = mult_perc(space_info->total_bytes, 90); - lockdep_assert_held(&space_info->lock); + /* + * We have tickets queued, bail so we don't compete with the async + * flushers. + */ + if (space_info->reclaim_size) + return false; + + thresh = mult_perc(space_info->total_bytes, 90); + /* If we're just plain full then async reclaim just slows us down. */ if ((space_info->bytes_used + space_info->bytes_reserved + global_rsv_size) >= thresh) @@ -866,13 +995,6 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, return false; /* - * We have tickets queued, bail so we don't compete with the async - * flushers. - */ - if (space_info->reclaim_size) - return false; - - /* * If we have over half of the free space occupied by reservations or * pinned then we want to start flushing. * @@ -901,8 +1023,7 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, * much delalloc we need for the background flusher to kick in. */ - thresh = calc_available_free_space(fs_info, space_info, - BTRFS_RESERVE_FLUSH_ALL); + thresh = calc_available_free_space(space_info, BTRFS_RESERVE_FLUSH_ALL); used = space_info->bytes_used + space_info->bytes_reserved + space_info->bytes_readonly + global_rsv_size; if (used < space_info->total_bytes) @@ -937,8 +1058,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); if (ordered >= delalloc) - used += fs_info->delayed_refs_rsv.reserved + - fs_info->delayed_block_rsv.reserved; + used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) + + btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv); else used += space_info->bytes_may_use - global_rsv_size; @@ -946,13 +1067,15 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); } -static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, +static bool steal_from_global_rsv(struct btrfs_space_info *space_info, struct reserve_ticket *ticket) { + struct btrfs_fs_info *fs_info = space_info->fs_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; u64 min_bytes; + lockdep_assert_held(&space_info->lock); + if (!ticket->steal) return false; @@ -966,20 +1089,19 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, return false; } global_rsv->reserved -= ticket->bytes; - remove_ticket(space_info, ticket); - ticket->bytes = 0; - wake_up(&ticket->wait); - space_info->tickets_id++; if (global_rsv->reserved < global_rsv->size) - global_rsv->full = 0; + global_rsv->full = false; spin_unlock(&global_rsv->lock); + remove_ticket(space_info, ticket, 0); + space_info->tickets_id++; + return true; } /* - * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets - * @fs_info - fs_info for this fs + * We've exhausted our flushing, start failing tickets. + * * @space_info - the space info we were flushing * * We call this when we've exhausted our flushing ability and haven't made @@ -992,72 +1114,66 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, * other tickets, or if it stumbles across a ticket that was smaller than the * first ticket. */ -static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info) +static bool maybe_fail_all_tickets(struct btrfs_space_info *space_info) { + struct btrfs_fs_info *fs_info = space_info->fs_info; struct reserve_ticket *ticket; u64 tickets_id = space_info->tickets_id; - const bool aborted = BTRFS_FS_ERROR(fs_info); + const int abort_error = BTRFS_FS_ERROR(fs_info); trace_btrfs_fail_all_tickets(fs_info, space_info); if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); - __btrfs_dump_space_info(fs_info, space_info); + __btrfs_dump_space_info(space_info); } while (!list_empty(&space_info->tickets) && tickets_id == space_info->tickets_id) { ticket = list_first_entry(&space_info->tickets, struct reserve_ticket, list); + if (unlikely(abort_error)) { + remove_ticket(space_info, ticket, abort_error); + } else { + if (steal_from_global_rsv(space_info, ticket)) + return true; - if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket)) - return true; - - if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) - btrfs_info(fs_info, "failing ticket with %llu bytes", - ticket->bytes); + if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) + btrfs_info(fs_info, "failing ticket with %llu bytes", + ticket->bytes); - remove_ticket(space_info, ticket); - if (aborted) - ticket->error = -EIO; - else - ticket->error = -ENOSPC; - wake_up(&ticket->wait); + remove_ticket(space_info, ticket, -ENOSPC); - /* - * We're just throwing tickets away, so more flushing may not - * trip over btrfs_try_granting_tickets, so we need to call it - * here to see if we can make progress with the next ticket in - * the list. - */ - if (!aborted) - btrfs_try_granting_tickets(fs_info, space_info); + /* + * We're just throwing tickets away, so more flushing may + * not trip over btrfs_try_granting_tickets, so we need + * to call it here to see if we can make progress with + * the next ticket in the list. + */ + btrfs_try_granting_tickets(space_info); + } } return (tickets_id != space_info->tickets_id); } -/* - * This is for normal flushers, we can wait all goddamned day if we want to. We - * will loop and continuously try to flush as long as we are making progress. - * We count progress as clearing off tickets each time we have to loop. - */ -static void btrfs_async_reclaim_metadata_space(struct work_struct *work) +static void do_async_reclaim_metadata_space(struct btrfs_space_info *space_info) { - struct btrfs_fs_info *fs_info; - struct btrfs_space_info *space_info; + struct btrfs_fs_info *fs_info = space_info->fs_info; u64 to_reclaim; enum btrfs_flush_state flush_state; int commit_cycles = 0; u64 last_tickets_id; + enum btrfs_flush_state final_state; - fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); - space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); + if (btrfs_is_zoned(fs_info)) + final_state = RESET_ZONES; + else + final_state = COMMIT_TRANS; spin_lock(&space_info->lock); - to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); + to_reclaim = btrfs_calc_reclaim_metadata_size(space_info); if (!to_reclaim) { - space_info->flush = 0; + space_info->flush = false; spin_unlock(&space_info->lock); return; } @@ -1066,15 +1182,14 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) flush_state = FLUSH_DELAYED_ITEMS_NR; do { - flush_space(fs_info, space_info, to_reclaim, flush_state, false); + flush_space(space_info, to_reclaim, flush_state, false); spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { - space_info->flush = 0; + space_info->flush = false; spin_unlock(&space_info->lock); return; } - to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, - space_info); + to_reclaim = btrfs_calc_reclaim_metadata_size(space_info); if (last_tickets_id == space_info->tickets_id) { flush_state++; } else { @@ -1105,21 +1220,40 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) flush_state++; - if (flush_state > COMMIT_TRANS) { + if (flush_state > final_state) { commit_cycles++; if (commit_cycles > 2) { - if (maybe_fail_all_tickets(fs_info, space_info)) { + if (maybe_fail_all_tickets(space_info)) { flush_state = FLUSH_DELAYED_ITEMS_NR; commit_cycles--; } else { - space_info->flush = 0; + space_info->flush = false; } } else { flush_state = FLUSH_DELAYED_ITEMS_NR; } } spin_unlock(&space_info->lock); - } while (flush_state <= COMMIT_TRANS); + } while (flush_state <= final_state); +} + +/* + * This is for normal flushers, it can wait as much time as needed. We will + * loop and continuously try to flush as long as we are making progress. We + * count progress as clearing off tickets each time we have to loop. + */ +static void btrfs_async_reclaim_metadata_space(struct work_struct *work) +{ + struct btrfs_fs_info *fs_info; + struct btrfs_space_info *space_info; + + fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); + space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); + do_async_reclaim_metadata_space(space_info); + for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) { + if (space_info->sub_group[i]) + do_async_reclaim_metadata_space(space_info->sub_group[i]); + } } /* @@ -1149,14 +1283,15 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) trans_rsv = &fs_info->trans_block_rsv; spin_lock(&space_info->lock); - while (need_preemptive_reclaim(fs_info, space_info)) { + while (need_preemptive_reclaim(space_info)) { enum btrfs_flush_state flush; u64 delalloc_size = 0; u64 to_reclaim, block_rsv_size; - u64 global_rsv_size = global_rsv->reserved; - - loops++; + const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv); + const u64 bytes_may_use = space_info->bytes_may_use; + const u64 bytes_pinned = space_info->bytes_pinned; + spin_unlock(&space_info->lock); /* * We don't have a precise counter for the metadata being * reserved for delalloc, so we'll approximate it by subtracting @@ -1165,11 +1300,11 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) * assume it's tied up in delalloc reservations. */ block_rsv_size = global_rsv_size + - delayed_block_rsv->reserved + - delayed_refs_rsv->reserved + - trans_rsv->reserved; - if (block_rsv_size < space_info->bytes_may_use) - delalloc_size = space_info->bytes_may_use - block_rsv_size; + btrfs_block_rsv_reserved(delayed_block_rsv) + + btrfs_block_rsv_reserved(delayed_refs_rsv) + + btrfs_block_rsv_reserved(trans_rsv); + if (block_rsv_size < bytes_may_use) + delalloc_size = bytes_may_use - block_rsv_size; /* * We don't want to include the global_rsv in our calculation, @@ -1186,21 +1321,21 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) if (delalloc_size > block_rsv_size) { to_reclaim = delalloc_size; flush = FLUSH_DELALLOC; - } else if (space_info->bytes_pinned > - (delayed_block_rsv->reserved + - delayed_refs_rsv->reserved)) { - to_reclaim = space_info->bytes_pinned; + } else if (bytes_pinned > + (btrfs_block_rsv_reserved(delayed_block_rsv) + + btrfs_block_rsv_reserved(delayed_refs_rsv))) { + to_reclaim = bytes_pinned; flush = COMMIT_TRANS; - } else if (delayed_block_rsv->reserved > - delayed_refs_rsv->reserved) { - to_reclaim = delayed_block_rsv->reserved; + } else if (btrfs_block_rsv_reserved(delayed_block_rsv) > + btrfs_block_rsv_reserved(delayed_refs_rsv)) { + to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv); flush = FLUSH_DELAYED_ITEMS_NR; } else { - to_reclaim = delayed_refs_rsv->reserved; + to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv); flush = FLUSH_DELAYED_REFS_NR; } - spin_unlock(&space_info->lock); + loops++; /* * We don't want to reclaim everything, just a portion, so scale @@ -1210,7 +1345,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) to_reclaim >>= 2; if (!to_reclaim) to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); - flush_space(fs_info, space_info, to_reclaim, flush, true); + flush_space(space_info, to_reclaim, flush, true); cond_resched(); spin_lock(&space_info->lock); } @@ -1243,13 +1378,17 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) * If we are freeing inodes, we want to make sure all delayed iputs have * completed, because they could have been on an inode with i_nlink == 0, and * thus have been truncated and freed up space. But again this space is not - * immediately re-usable, it comes in the form of a delayed ref, which must be + * immediately reusable, it comes in the form of a delayed ref, which must be * run and then the transaction must be committed. * * COMMIT_TRANS * This is where we reclaim all of the pinned space generated by running the * iputs * + * RESET_ZONES + * This state works only for the zoned mode. We scan the unused block group + * list and reset the zones and reuse the block group. + * * ALLOC_CHUNK_FORCE * For data we start with alloc chunk force, however we could have been full * before, and then the transaction commit could have freed new block groups, @@ -1259,22 +1398,19 @@ static const enum btrfs_flush_state data_flush_states[] = { FLUSH_DELALLOC_FULL, RUN_DELAYED_IPUTS, COMMIT_TRANS, + RESET_ZONES, ALLOC_CHUNK_FORCE, }; -static void btrfs_async_reclaim_data_space(struct work_struct *work) +static void do_async_reclaim_data_space(struct btrfs_space_info *space_info) { - struct btrfs_fs_info *fs_info; - struct btrfs_space_info *space_info; + struct btrfs_fs_info *fs_info = space_info->fs_info; u64 last_tickets_id; enum btrfs_flush_state flush_state = 0; - fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); - space_info = fs_info->data_sinfo; - spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { - space_info->flush = 0; + space_info->flush = false; spin_unlock(&space_info->lock); return; } @@ -1282,27 +1418,27 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work) spin_unlock(&space_info->lock); while (!space_info->full) { - flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); + flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { - space_info->flush = 0; + space_info->flush = false; spin_unlock(&space_info->lock); return; } /* Something happened, fail everything and bail. */ - if (BTRFS_FS_ERROR(fs_info)) + if (unlikely(BTRFS_FS_ERROR(fs_info))) goto aborted_fs; last_tickets_id = space_info->tickets_id; spin_unlock(&space_info->lock); } while (flush_state < ARRAY_SIZE(data_flush_states)) { - flush_space(fs_info, space_info, U64_MAX, + flush_space(space_info, U64_MAX, data_flush_states[flush_state], false); spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { - space_info->flush = 0; + space_info->flush = false; spin_unlock(&space_info->lock); return; } @@ -1316,16 +1452,16 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work) if (flush_state >= ARRAY_SIZE(data_flush_states)) { if (space_info->full) { - if (maybe_fail_all_tickets(fs_info, space_info)) + if (maybe_fail_all_tickets(space_info)) flush_state = 0; else - space_info->flush = 0; + space_info->flush = false; } else { flush_state = 0; } /* Something happened, fail everything and bail. */ - if (BTRFS_FS_ERROR(fs_info)) + if (unlikely(BTRFS_FS_ERROR(fs_info))) goto aborted_fs; } @@ -1334,11 +1470,24 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work) return; aborted_fs: - maybe_fail_all_tickets(fs_info, space_info); - space_info->flush = 0; + maybe_fail_all_tickets(space_info); + space_info->flush = false; spin_unlock(&space_info->lock); } +static void btrfs_async_reclaim_data_space(struct work_struct *work) +{ + struct btrfs_fs_info *fs_info; + struct btrfs_space_info *space_info; + + fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); + space_info = fs_info->data_sinfo; + do_async_reclaim_data_space(space_info); + for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) + if (space_info->sub_group[i]) + do_async_reclaim_data_space(space_info->sub_group[i]); +} + void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) { INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); @@ -1350,6 +1499,7 @@ void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) static const enum btrfs_flush_state priority_flush_states[] = { FLUSH_DELAYED_ITEMS_NR, FLUSH_DELAYED_ITEMS, + RESET_ZONES, ALLOC_CHUNK, }; @@ -1363,106 +1513,105 @@ static const enum btrfs_flush_state evict_flush_states[] = { FLUSH_DELALLOC_FULL, ALLOC_CHUNK, COMMIT_TRANS, + RESET_ZONES, }; -static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - struct reserve_ticket *ticket, - const enum btrfs_flush_state *states, - int states_nr) +static bool is_ticket_served(struct reserve_ticket *ticket) +{ + bool ret; + + spin_lock(&ticket->lock); + ret = (ticket->bytes == 0); + spin_unlock(&ticket->lock); + + return ret; +} + +static void priority_reclaim_metadata_space(struct btrfs_space_info *space_info, + struct reserve_ticket *ticket, + const enum btrfs_flush_state *states, + int states_nr) { + struct btrfs_fs_info *fs_info = space_info->fs_info; u64 to_reclaim; int flush_state = 0; - spin_lock(&space_info->lock); - to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); /* * This is the priority reclaim path, so to_reclaim could be >0 still * because we may have only satisfied the priority tickets and still * left non priority tickets on the list. We would then have * to_reclaim but ->bytes == 0. */ - if (ticket->bytes == 0) { - spin_unlock(&space_info->lock); + if (is_ticket_served(ticket)) return; - } + + spin_lock(&space_info->lock); + to_reclaim = btrfs_calc_reclaim_metadata_size(space_info); + spin_unlock(&space_info->lock); while (flush_state < states_nr) { - spin_unlock(&space_info->lock); - flush_space(fs_info, space_info, to_reclaim, states[flush_state], - false); - flush_state++; - spin_lock(&space_info->lock); - if (ticket->bytes == 0) { - spin_unlock(&space_info->lock); + flush_space(space_info, to_reclaim, states[flush_state], false); + if (is_ticket_served(ticket)) return; - } + flush_state++; } + spin_lock(&space_info->lock); /* * Attempt to steal from the global rsv if we can, except if the fs was * turned into error mode due to a transaction abort when flushing space * above, in that case fail with the abort error instead of returning * success to the caller if we can steal from the global rsv - this is - * just to have caller fail immeditelly instead of later when trying to + * just to have caller fail immediately instead of later when trying to * modify the fs, making it easier to debug -ENOSPC problems. */ - if (BTRFS_FS_ERROR(fs_info)) { - ticket->error = BTRFS_FS_ERROR(fs_info); - remove_ticket(space_info, ticket); - } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) { - ticket->error = -ENOSPC; - remove_ticket(space_info, ticket); - } + if (unlikely(BTRFS_FS_ERROR(fs_info))) + remove_ticket(space_info, ticket, BTRFS_FS_ERROR(fs_info)); + else if (!steal_from_global_rsv(space_info, ticket)) + remove_ticket(space_info, ticket, -ENOSPC); /* * We must run try_granting_tickets here because we could be a large * ticket in front of a smaller ticket that can now be satisfied with * the available space. */ - btrfs_try_granting_tickets(fs_info, space_info); + btrfs_try_granting_tickets(space_info); spin_unlock(&space_info->lock); } -static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, +static void priority_reclaim_data_space(struct btrfs_space_info *space_info, struct reserve_ticket *ticket) { - spin_lock(&space_info->lock); - /* We could have been granted before we got here. */ - if (ticket->bytes == 0) { - spin_unlock(&space_info->lock); + if (is_ticket_served(ticket)) return; - } + spin_lock(&space_info->lock); while (!space_info->full) { spin_unlock(&space_info->lock); - flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); - spin_lock(&space_info->lock); - if (ticket->bytes == 0) { - spin_unlock(&space_info->lock); + flush_space(space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); + if (is_ticket_served(ticket)) return; - } + spin_lock(&space_info->lock); } - ticket->error = -ENOSPC; - remove_ticket(space_info, ticket); - btrfs_try_granting_tickets(fs_info, space_info); + remove_ticket(space_info, ticket, -ENOSPC); + btrfs_try_granting_tickets(space_info); spin_unlock(&space_info->lock); } -static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, +static void wait_reserve_ticket(struct btrfs_space_info *space_info, struct reserve_ticket *ticket) { DEFINE_WAIT(wait); - int ret = 0; - spin_lock(&space_info->lock); + spin_lock(&ticket->lock); while (ticket->bytes > 0 && ticket->error == 0) { + int ret; + ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); + spin_unlock(&ticket->lock); if (ret) { /* * Delete us from the list. After we unlock the space @@ -1472,24 +1621,23 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, * despite getting an error, resulting in a space leak * (bytes_may_use counter of our space_info). */ - remove_ticket(space_info, ticket); - ticket->error = -EINTR; - break; + spin_lock(&space_info->lock); + remove_ticket(space_info, ticket, -EINTR); + spin_unlock(&space_info->lock); + return; } - spin_unlock(&space_info->lock); schedule(); finish_wait(&ticket->wait, &wait); - spin_lock(&space_info->lock); + spin_lock(&ticket->lock); } - spin_unlock(&space_info->lock); + spin_unlock(&ticket->lock); } /* * Do the appropriate flushing and waiting for a ticket. * - * @fs_info: the filesystem * @space_info: space info for the reservation * @ticket: ticket for the reservation * @start_ns: timestamp when the reservation started @@ -1499,8 +1647,7 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, * This does the work of figuring out how to flush for the ticket, waiting for * the reservation, and returning the appropriate error if there is one. */ -static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, +static int handle_reserve_ticket(struct btrfs_space_info *space_info, struct reserve_ticket *ticket, u64 start_ns, u64 orig_bytes, enum btrfs_reserve_flush_enum flush) @@ -1511,23 +1658,23 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, case BTRFS_RESERVE_FLUSH_DATA: case BTRFS_RESERVE_FLUSH_ALL: case BTRFS_RESERVE_FLUSH_ALL_STEAL: - wait_reserve_ticket(fs_info, space_info, ticket); + wait_reserve_ticket(space_info, ticket); break; case BTRFS_RESERVE_FLUSH_LIMIT: - priority_reclaim_metadata_space(fs_info, space_info, ticket, + priority_reclaim_metadata_space(space_info, ticket, priority_flush_states, ARRAY_SIZE(priority_flush_states)); break; case BTRFS_RESERVE_FLUSH_EVICT: - priority_reclaim_metadata_space(fs_info, space_info, ticket, + priority_reclaim_metadata_space(space_info, ticket, evict_flush_states, ARRAY_SIZE(evict_flush_states)); break; case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: - priority_reclaim_data_space(fs_info, space_info, ticket); + priority_reclaim_data_space(space_info, ticket); break; default: - ASSERT(0); + ASSERT(0, "flush=%d", flush); break; } @@ -1539,9 +1686,10 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, * releasing reserved space (if an error happens the expectation is that * space wasn't reserved at all). */ - ASSERT(!(ticket->bytes == 0 && ticket->error)); - trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, - start_ns, flush, ticket->error); + ASSERT(!(ticket->bytes == 0 && ticket->error), + "ticket->bytes=%llu ticket->error=%d", ticket->bytes, ticket->error); + trace_btrfs_reserve_ticket(space_info->fs_info, space_info->flags, + orig_bytes, start_ns, flush, ticket->error); return ret; } @@ -1555,9 +1703,9 @@ static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); } -static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info) +static inline void maybe_clamp_preempt(struct btrfs_space_info *space_info) { + struct btrfs_fs_info *fs_info = space_info->fs_info; u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); @@ -1592,7 +1740,6 @@ static inline bool can_ticket(enum btrfs_reserve_flush_enum flush) /* * Try to reserve bytes from the block_rsv's space. * - * @fs_info: the filesystem * @space_info: space info we want to allocate from * @orig_bytes: number of bytes we want * @flush: whether or not we can flush to make our reservation @@ -1604,10 +1751,10 @@ static inline bool can_ticket(enum btrfs_reserve_flush_enum flush) * regain reservations will be made and this will fail if there is not enough * space already. */ -static int __reserve_bytes(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, u64 orig_bytes, - enum btrfs_reserve_flush_enum flush) +static int reserve_bytes(struct btrfs_space_info *space_info, u64 orig_bytes, + enum btrfs_reserve_flush_enum flush) { + struct btrfs_fs_info *fs_info = space_info->fs_info; struct work_struct *async_work; struct reserve_ticket ticket; u64 start_ns = 0; @@ -1615,7 +1762,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, int ret = -ENOSPC; bool pending_tickets; - ASSERT(orig_bytes); + ASSERT(orig_bytes, "orig_bytes=%llu", orig_bytes); /* * If have a transaction handle (current->journal_info != NULL), then * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor @@ -1624,9 +1771,9 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, */ if (current->journal_info) { /* One assert per line for easier debugging. */ - ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL); - ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL); - ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT); + ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL, "flush=%d", flush); + ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL, "flush=%d", flush); + ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT, "flush=%d", flush); } if (flush == BTRFS_RESERVE_FLUSH_DATA) @@ -1654,9 +1801,8 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, */ if (!pending_tickets && ((used + orig_bytes <= space_info->total_bytes) || - btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { - btrfs_space_info_update_bytes_may_use(fs_info, space_info, - orig_bytes); + can_overcommit(space_info, used, orig_bytes, flush))) { + btrfs_space_info_update_bytes_may_use(space_info, orig_bytes); ret = 0; } @@ -1666,10 +1812,9 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, * left to allocate for the block. */ if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) { - used = btrfs_space_info_used(space_info, false); + used -= space_info->bytes_may_use; if (used + orig_bytes <= space_info->total_bytes) { - btrfs_space_info_update_bytes_may_use(fs_info, space_info, - orig_bytes); + btrfs_space_info_update_bytes_may_use(space_info, orig_bytes); ret = 0; } } @@ -1686,6 +1831,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, ticket.error = 0; space_info->reclaim_size += ticket.bytes; init_waitqueue_head(&ticket.wait); + spin_lock_init(&ticket.lock); ticket.steal = can_steal(flush); if (trace_btrfs_reserve_ticket_enabled()) start_ns = ktime_get_ns(); @@ -1702,14 +1848,14 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, * preemptive flushing in order to keep up with * the workload. */ - maybe_clamp_preempt(fs_info, space_info); + maybe_clamp_preempt(space_info); - space_info->flush = 1; + space_info->flush = true; trace_btrfs_trigger_flush(fs_info, space_info->flags, orig_bytes, flush, "enospc"); - queue_work(system_unbound_wq, async_work); + queue_work(system_dfl_wq, async_work); } } else { list_add_tail(&ticket.list, @@ -1723,10 +1869,10 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, */ if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && !work_busy(&fs_info->preempt_reclaim_work) && - need_preemptive_reclaim(fs_info, space_info)) { + need_preemptive_reclaim(space_info)) { trace_btrfs_trigger_flush(fs_info, space_info->flags, orig_bytes, flush, "preempt"); - queue_work(system_unbound_wq, + queue_work(system_dfl_wq, &fs_info->preempt_reclaim_work); } } @@ -1734,15 +1880,13 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, if (!ret || !can_ticket(flush)) return ret; - return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, - orig_bytes, flush); + return handle_reserve_ticket(space_info, &ticket, start_ns, orig_bytes, flush); } /* * Try to reserve metadata bytes from the block_rsv's space. * - * @fs_info: the filesystem - * @block_rsv: block_rsv we're allocating for + * @space_info: the space_info we're allocating for * @orig_bytes: number of bytes we want * @flush: whether or not we can flush to make our reservation * @@ -1753,22 +1897,21 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, * regain reservations will be made and this will fail if there is not enough * space already. */ -int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, - struct btrfs_block_rsv *block_rsv, +int btrfs_reserve_metadata_bytes(struct btrfs_space_info *space_info, u64 orig_bytes, enum btrfs_reserve_flush_enum flush) { int ret; - ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); + ret = reserve_bytes(space_info, orig_bytes, flush); if (ret == -ENOSPC) { + struct btrfs_fs_info *fs_info = space_info->fs_info; + trace_btrfs_space_reservation(fs_info, "space_info:enospc", - block_rsv->space_info->flags, - orig_bytes, 1); + space_info->flags, orig_bytes, 1); if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) - btrfs_dump_space_info(fs_info, block_rsv->space_info, - orig_bytes, 0); + btrfs_dump_space_info(space_info, orig_bytes, false); } return ret; } @@ -1776,30 +1919,32 @@ int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, /* * Try to reserve data bytes for an allocation. * - * @fs_info: the filesystem + * @space_info: the space_info we're allocating for * @bytes: number of bytes we need * @flush: how we are allowed to flush * * This will reserve bytes from the data space info. If there is not enough * space then we will attempt to flush space as specified by flush. */ -int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, +int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes, enum btrfs_reserve_flush_enum flush) { - struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; + struct btrfs_fs_info *fs_info = space_info->fs_info; int ret; ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE || - flush == BTRFS_RESERVE_NO_FLUSH); - ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); + flush == BTRFS_RESERVE_NO_FLUSH, "flush=%d", flush); + ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA, + "current->journal_info=0x%lx flush=%d", + (unsigned long)current->journal_info, flush); - ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); + ret = reserve_bytes(space_info, bytes, flush); if (ret == -ENOSPC) { trace_btrfs_space_reservation(fs_info, "space_info:enospc", - data_sinfo->flags, bytes, 1); + space_info->flags, bytes, 1); if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) - btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); + btrfs_dump_space_info(space_info, bytes, false); } return ret; } @@ -1812,7 +1957,7 @@ __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info) btrfs_info(fs_info, "dumping space info:"); list_for_each_entry(space_info, &fs_info->space_info, list) { spin_lock(&space_info->lock); - __btrfs_dump_space_info(fs_info, space_info); + __btrfs_dump_space_info(space_info); spin_unlock(&space_info->lock); } dump_global_block_rsv(fs_info); @@ -1829,7 +1974,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) int factor; /* It's df, we don't care if it's racy */ - if (list_empty(&sinfo->ro_bgs)) + if (data_race(list_empty(&sinfo->ro_bgs))) return 0; spin_lock(&sinfo->lock); @@ -1851,3 +1996,230 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) return free_bytes; } + +static u64 calc_pct_ratio(u64 x, u64 y) +{ + int ret; + + if (!y) + return 0; +again: + ret = check_mul_overflow(100, x, &x); + if (ret) + goto lose_precision; + return div64_u64(x, y); +lose_precision: + x >>= 10; + y >>= 10; + if (!y) + y = 1; + goto again; +} + +/* + * A reasonable buffer for unallocated space is 10 data block_groups. + * If we claw this back repeatedly, we can still achieve efficient + * utilization when near full, and not do too much reclaim while + * always maintaining a solid buffer for workloads that quickly + * allocate and pressure the unallocated space. + */ +static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info) +{ + u64 chunk_sz = calc_effective_data_chunk_size(fs_info); + + return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * chunk_sz; +} + +/* + * The fundamental goal of automatic reclaim is to protect the filesystem's + * unallocated space and thus minimize the probability of the filesystem going + * read only when a metadata allocation failure causes a transaction abort. + * + * However, relocations happen into the space_info's unused space, therefore + * automatic reclaim must also back off as that space runs low. There is no + * value in doing trivial "relocations" of re-writing the same block group + * into a fresh one. + * + * Furthermore, we want to avoid doing too much reclaim even if there are good + * candidates. This is because the allocator is pretty good at filling up the + * holes with writes. So we want to do just enough reclaim to try and stay + * safe from running out of unallocated space but not be wasteful about it. + * + * Therefore, the dynamic reclaim threshold is calculated as follows: + * - calculate a target unallocated amount of 5 block group sized chunks + * - ratchet up the intensity of reclaim depending on how far we are from + * that target by using a formula of unalloc / target to set the threshold. + * + * Typically with 10 block groups as the target, the discrete values this comes + * out to are 0, 10, 20, ... , 80, 90, and 99. + */ +static int calc_dynamic_reclaim_threshold(const struct btrfs_space_info *space_info) +{ + struct btrfs_fs_info *fs_info = space_info->fs_info; + u64 unalloc = atomic64_read(&fs_info->free_chunk_space); + u64 target = calc_unalloc_target(fs_info); + u64 alloc = space_info->total_bytes; + u64 used = btrfs_space_info_used(space_info, false); + u64 unused = alloc - used; + u64 want = target > unalloc ? target - unalloc : 0; + u64 data_chunk_size = calc_effective_data_chunk_size(fs_info); + + /* If we have no unused space, don't bother, it won't work anyway. */ + if (unused < data_chunk_size) + return 0; + + /* Cast to int is OK because want <= target. */ + return calc_pct_ratio(want, target); +} + +int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info) +{ + lockdep_assert_held(&space_info->lock); + + if (READ_ONCE(space_info->dynamic_reclaim)) + return calc_dynamic_reclaim_threshold(space_info); + return READ_ONCE(space_info->bg_reclaim_threshold); +} + +/* + * Under "urgent" reclaim, we will reclaim even fresh block groups that have + * recently seen successful allocations, as we are desperate to reclaim + * whatever we can to avoid ENOSPC in a transaction leading to a readonly fs. + */ +static bool is_reclaim_urgent(struct btrfs_space_info *space_info) +{ + struct btrfs_fs_info *fs_info = space_info->fs_info; + u64 unalloc = atomic64_read(&fs_info->free_chunk_space); + u64 data_chunk_size = calc_effective_data_chunk_size(fs_info); + + return unalloc < data_chunk_size; +} + +static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid) +{ + struct btrfs_block_group *bg; + int thresh_pct; + bool try_again = true; + bool urgent; + + spin_lock(&space_info->lock); + urgent = is_reclaim_urgent(space_info); + thresh_pct = btrfs_calc_reclaim_threshold(space_info); + spin_unlock(&space_info->lock); + + down_read(&space_info->groups_sem); +again: + list_for_each_entry(bg, &space_info->block_groups[raid], list) { + u64 thresh; + bool reclaim = false; + + btrfs_get_block_group(bg); + spin_lock(&bg->lock); + thresh = mult_perc(bg->length, thresh_pct); + if (bg->used < thresh && bg->reclaim_mark) { + try_again = false; + reclaim = true; + } + bg->reclaim_mark++; + spin_unlock(&bg->lock); + if (reclaim) + btrfs_mark_bg_to_reclaim(bg); + btrfs_put_block_group(bg); + } + + /* + * In situations where we are very motivated to reclaim (low unalloc) + * use two passes to make the reclaim mark check best effort. + * + * If we have any staler groups, we don't touch the fresher ones, but if we + * really need a block group, do take a fresh one. + */ + if (try_again && urgent) { + try_again = false; + goto again; + } + + up_read(&space_info->groups_sem); +} + +void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes) +{ + u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info); + + lockdep_assert_held(&space_info->lock); + space_info->reclaimable_bytes += bytes; + + if (space_info->reclaimable_bytes >= chunk_sz) + btrfs_set_periodic_reclaim_ready(space_info, true); +} + +void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready) +{ + lockdep_assert_held(&space_info->lock); + if (!READ_ONCE(space_info->periodic_reclaim)) + return; + if (ready != space_info->periodic_reclaim_ready) { + space_info->periodic_reclaim_ready = ready; + if (!ready) + space_info->reclaimable_bytes = 0; + } +} + +static bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info) +{ + bool ret; + + if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) + return false; + if (!READ_ONCE(space_info->periodic_reclaim)) + return false; + + spin_lock(&space_info->lock); + ret = space_info->periodic_reclaim_ready; + btrfs_set_periodic_reclaim_ready(space_info, false); + spin_unlock(&space_info->lock); + + return ret; +} + +void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info) +{ + int raid; + struct btrfs_space_info *space_info; + + list_for_each_entry(space_info, &fs_info->space_info, list) { + if (!btrfs_should_periodic_reclaim(space_info)) + continue; + for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) + do_reclaim_sweep(space_info, raid); + } +} + +void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len) +{ + struct btrfs_fs_info *fs_info = space_info->fs_info; + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; + + lockdep_assert_held(&space_info->lock); + + /* Prioritize the global reservation to receive the freed space. */ + if (global_rsv->space_info != space_info) + goto grant; + + spin_lock(&global_rsv->lock); + if (!global_rsv->full) { + u64 to_add = min(len, global_rsv->size - global_rsv->reserved); + + global_rsv->reserved += to_add; + btrfs_space_info_update_bytes_may_use(space_info, to_add); + if (global_rsv->reserved >= global_rsv->size) + global_rsv->full = true; + len -= to_add; + } + spin_unlock(&global_rsv->lock); + +grant: + /* Add to any tickets we may have. */ + if (len) + btrfs_try_granting_tickets(space_info); +} |
