diff options
Diffstat (limited to 'fs/btrfs/extent_map.c')
| -rw-r--r-- | fs/btrfs/extent_map.c | 1399 |
1 files changed, 1176 insertions, 223 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index a4a7a1a8da95..7e38c23a0c1c 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -1,119 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 + #include <linux/err.h> #include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/hardirq.h> +#include "messages.h" #include "ctree.h" #include "extent_map.h" +#include "compression.h" +#include "btrfs_inode.h" +#include "disk-io.h" static struct kmem_cache *extent_map_cache; -int __init extent_map_init(void) +int __init btrfs_extent_map_init(void) { extent_map_cache = kmem_cache_create("btrfs_extent_map", - sizeof(struct extent_map), 0, - SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); + sizeof(struct extent_map), 0, 0, NULL); if (!extent_map_cache) return -ENOMEM; return 0; } -void extent_map_exit(void) +void __cold btrfs_extent_map_exit(void) { - if (extent_map_cache) - kmem_cache_destroy(extent_map_cache); + kmem_cache_destroy(extent_map_cache); } -/** - * extent_map_tree_init - initialize extent map tree - * @tree: tree to initialize - * - * Initialize the extent tree @tree. Should be called for each new inode - * or other user of the extent_map interface. +/* + * Initialize the extent tree @tree. Should be called for each new inode or + * other user of the extent_map interface. */ -void extent_map_tree_init(struct extent_map_tree *tree) +void btrfs_extent_map_tree_init(struct extent_map_tree *tree) { - tree->map = RB_ROOT; + tree->root = RB_ROOT; INIT_LIST_HEAD(&tree->modified_extents); rwlock_init(&tree->lock); } -/** - * alloc_extent_map - allocate new extent map structure - * - * Allocate a new extent_map structure. The new structure is - * returned with a reference count of one and needs to be - * freed using free_extent_map() +/* + * Allocate a new extent_map structure. The new structure is returned with a + * reference count of one and needs to be freed using free_extent_map() */ -struct extent_map *alloc_extent_map(void) +struct extent_map *btrfs_alloc_extent_map(void) { struct extent_map *em; em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); if (!em) return NULL; - em->in_tree = 0; - em->flags = 0; - em->compress_type = BTRFS_COMPRESS_NONE; - em->generation = 0; - atomic_set(&em->refs, 1); + RB_CLEAR_NODE(&em->rb_node); + refcount_set(&em->refs, 1); INIT_LIST_HEAD(&em->list); return em; } -/** - * free_extent_map - drop reference count of an extent_map - * @em: extent map beeing releasead - * - * Drops the reference out on @em by one and free the structure - * if the reference count hits zero. +/* + * Drop the reference out on @em by one and free the structure if the reference + * count hits zero. */ -void free_extent_map(struct extent_map *em) +void btrfs_free_extent_map(struct extent_map *em) { if (!em) return; - WARN_ON(atomic_read(&em->refs) == 0); - if (atomic_dec_and_test(&em->refs)) { - WARN_ON(em->in_tree); + if (refcount_dec_and_test(&em->refs)) { + WARN_ON(btrfs_extent_map_in_tree(em)); WARN_ON(!list_empty(&em->list)); kmem_cache_free(extent_map_cache, em); } } -static struct rb_node *tree_insert(struct rb_root *root, u64 offset, - struct rb_node *node) +/* Do the math around the end of an extent, handling wrapping. */ +static u64 range_end(u64 start, u64 len) +{ + if (start + len < start) + return (u64)-1; + return start + len; +} + +static void remove_em(struct btrfs_inode *inode, struct extent_map *em) +{ + struct btrfs_fs_info *fs_info = inode->root->fs_info; + + rb_erase(&em->rb_node, &inode->extent_tree.root); + RB_CLEAR_NODE(&em->rb_node); + + if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(inode->root))) + percpu_counter_dec(&fs_info->evictable_extent_maps); +} + +static int tree_insert(struct rb_root *root, struct extent_map *em) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; - struct extent_map *entry; + struct extent_map *entry = NULL; + struct rb_node *orig_parent = NULL; + u64 end = range_end(em->start, em->len); while (*p) { parent = *p; entry = rb_entry(parent, struct extent_map, rb_node); - WARN_ON(!entry->in_tree); - - if (offset < entry->start) + if (em->start < entry->start) p = &(*p)->rb_left; - else if (offset >= extent_map_end(entry)) + else if (em->start >= btrfs_extent_map_end(entry)) p = &(*p)->rb_right; else - return parent; + return -EEXIST; } - entry = rb_entry(node, struct extent_map, rb_node); - entry->in_tree = 1; - rb_link_node(node, parent, p); - rb_insert_color(node, root); - return NULL; + orig_parent = parent; + while (parent && em->start >= btrfs_extent_map_end(entry)) { + parent = rb_next(parent); + entry = rb_entry(parent, struct extent_map, rb_node); + } + if (parent) + if (end > entry->start && em->start < btrfs_extent_map_end(entry)) + return -EEXIST; + + parent = orig_parent; + entry = rb_entry(parent, struct extent_map, rb_node); + while (parent && em->start < entry->start) { + parent = rb_prev(parent); + entry = rb_entry(parent, struct extent_map, rb_node); + } + if (parent) + if (end > entry->start && em->start < btrfs_extent_map_end(entry)) + return -EEXIST; + + rb_link_node(&em->rb_node, orig_parent, p); + rb_insert_color(&em->rb_node, root); + return 0; } /* - * search through the tree for an extent_map with a given offset. If - * it can't be found, try to find some neighboring extents + * Search through the tree for an extent_map with a given offset. If it can't + * be found, try to find some neighboring extents */ -static struct rb_node *__tree_search(struct rb_root *root, u64 offset, - struct rb_node **prev_ret, - struct rb_node **next_ret) +static struct rb_node *tree_search(struct rb_root *root, u64 offset, + struct rb_node **prev_or_next_ret) { struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; @@ -121,125 +145,258 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, struct extent_map *entry; struct extent_map *prev_entry = NULL; + ASSERT(prev_or_next_ret); + while (n) { entry = rb_entry(n, struct extent_map, rb_node); prev = n; prev_entry = entry; - WARN_ON(!entry->in_tree); - if (offset < entry->start) n = n->rb_left; - else if (offset >= extent_map_end(entry)) + else if (offset >= btrfs_extent_map_end(entry)) n = n->rb_right; else return n; } - if (prev_ret) { - orig_prev = prev; - while (prev && offset >= extent_map_end(prev_entry)) { - prev = rb_next(prev); - prev_entry = rb_entry(prev, struct extent_map, rb_node); - } - *prev_ret = prev; - prev = orig_prev; + orig_prev = prev; + while (prev && offset >= btrfs_extent_map_end(prev_entry)) { + prev = rb_next(prev); + prev_entry = rb_entry(prev, struct extent_map, rb_node); + } + + /* + * Previous extent map found, return as in this case the caller does not + * care about the next one. + */ + if (prev) { + *prev_or_next_ret = prev; + return NULL; } - if (next_ret) { + prev = orig_prev; + prev_entry = rb_entry(prev, struct extent_map, rb_node); + while (prev && offset < prev_entry->start) { + prev = rb_prev(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); - while (prev && offset < prev_entry->start) { - prev = rb_prev(prev); - prev_entry = rb_entry(prev, struct extent_map, rb_node); - } - *next_ret = prev; } + *prev_or_next_ret = prev; + return NULL; } -/* check to see if two extent_map structs are adjacent and safe to merge */ -static int mergable_maps(struct extent_map *prev, struct extent_map *next) +static inline u64 extent_map_block_len(const struct extent_map *em) { - if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) - return 0; + if (btrfs_extent_map_is_compressed(em)) + return em->disk_num_bytes; + return em->len; +} - /* - * don't merge compressed extents, we need to know their - * actual size - */ - if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) - return 0; +static inline u64 extent_map_block_end(const struct extent_map *em) +{ + const u64 block_start = btrfs_extent_map_block_start(em); + const u64 block_end = block_start + extent_map_block_len(em); - if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || - test_bit(EXTENT_FLAG_LOGGING, &next->flags)) - return 0; + if (block_end < block_start) + return (u64)-1; + + return block_end; +} + +static bool can_merge_extent_map(const struct extent_map *em) +{ + if (em->flags & EXTENT_FLAG_PINNED) + return false; + + /* Don't merge compressed extents, we need to know their actual size. */ + if (btrfs_extent_map_is_compressed(em)) + return false; + + if (em->flags & EXTENT_FLAG_LOGGING) + return false; /* * We don't want to merge stuff that hasn't been written to the log yet * since it may not reflect exactly what is on disk, and that would be * bad. */ - if (!list_empty(&prev->list) || !list_empty(&next->list)) - return 0; + if (!list_empty(&em->list)) + return false; + + return true; +} + +/* Check to see if two extent_map structs are adjacent and safe to merge. */ +static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next) +{ + if (btrfs_extent_map_end(prev) != next->start) + return false; + + /* + * The merged flag is not an on-disk flag, it just indicates we had the + * extent maps of 2 (or more) adjacent extents merged, so factor it out. + */ + if ((prev->flags & ~EXTENT_FLAG_MERGED) != + (next->flags & ~EXTENT_FLAG_MERGED)) + return false; + + if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1) + return btrfs_extent_map_block_start(next) == extent_map_block_end(prev); + + /* HOLES and INLINE extents. */ + return next->disk_bytenr == prev->disk_bytenr; +} - if (extent_map_end(prev) == next->start && - prev->flags == next->flags && - prev->bdev == next->bdev && - ((next->block_start == EXTENT_MAP_HOLE && - prev->block_start == EXTENT_MAP_HOLE) || - (next->block_start == EXTENT_MAP_INLINE && - prev->block_start == EXTENT_MAP_INLINE) || - (next->block_start == EXTENT_MAP_DELALLOC && - prev->block_start == EXTENT_MAP_DELALLOC) || - (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && - next->block_start == extent_map_block_end(prev)))) { - return 1; +/* + * Handle the on-disk data extents merge for @prev and @next. + * + * @prev: left extent to merge + * @next: right extent to merge + * @merged: the extent we will not discard after the merge; updated with new values + * + * After this, one of the two extents is the new merged extent and the other is + * removed from the tree and likely freed. Note that @merged is one of @prev/@next + * so there is const/non-const aliasing occurring here. + * + * Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes. + * For now only uncompressed regular extent can be merged. + */ +static void merge_ondisk_extents(const struct extent_map *prev, const struct extent_map *next, + struct extent_map *merged) +{ + u64 new_disk_bytenr; + u64 new_disk_num_bytes; + u64 new_offset; + + /* @prev and @next should not be compressed. */ + ASSERT(!btrfs_extent_map_is_compressed(prev)); + ASSERT(!btrfs_extent_map_is_compressed(next)); + + /* + * There are two different cases where @prev and @next can be merged. + * + * 1) They are referring to the same data extent: + * + * |<----- data extent A ----->| + * |<- prev ->|<- next ->| + * + * 2) They are referring to different data extents but still adjacent: + * + * |<-- data extent A -->|<-- data extent B -->| + * |<- prev ->|<- next ->| + * + * The calculation here always merges the data extents first, then updates + * @offset using the new data extents. + * + * For case 1), the merged data extent would be the same. + * For case 2), we just merge the two data extents into one. + */ + new_disk_bytenr = min(prev->disk_bytenr, next->disk_bytenr); + new_disk_num_bytes = max(prev->disk_bytenr + prev->disk_num_bytes, + next->disk_bytenr + next->disk_num_bytes) - + new_disk_bytenr; + new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr; + + merged->disk_bytenr = new_disk_bytenr; + merged->disk_num_bytes = new_disk_num_bytes; + merged->ram_bytes = new_disk_num_bytes; + merged->offset = new_offset; +} + +static void dump_extent_map(struct btrfs_fs_info *fs_info, const char *prefix, + struct extent_map *em) +{ + if (!IS_ENABLED(CONFIG_BTRFS_DEBUG)) + return; + btrfs_crit(fs_info, +"%s, start=%llu len=%llu disk_bytenr=%llu disk_num_bytes=%llu ram_bytes=%llu offset=%llu flags=0x%x", + prefix, em->start, em->len, em->disk_bytenr, em->disk_num_bytes, + em->ram_bytes, em->offset, em->flags); + ASSERT(0); +} + +/* Internal sanity checks for btrfs debug builds. */ +static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map *em) +{ + if (!IS_ENABLED(CONFIG_BTRFS_DEBUG)) + return; + if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { + if (em->disk_num_bytes == 0) + dump_extent_map(fs_info, "zero disk_num_bytes", em); + if (em->offset + em->len > em->ram_bytes) + dump_extent_map(fs_info, "ram_bytes too small", em); + if (em->offset + em->len > em->disk_num_bytes && + !btrfs_extent_map_is_compressed(em)) + dump_extent_map(fs_info, "disk_num_bytes too small", em); + if (!btrfs_extent_map_is_compressed(em) && + em->ram_bytes != em->disk_num_bytes) + dump_extent_map(fs_info, + "ram_bytes mismatch with disk_num_bytes for non-compressed em", + em); + } else if (em->offset) { + dump_extent_map(fs_info, "non-zero offset for hole/inline", em); } - return 0; } -static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) +static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em) { + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map *merge = NULL; struct rb_node *rb; + /* + * We can't modify an extent map that is in the tree and that is being + * used by another task, as it can cause that other task to see it in + * inconsistent state during the merging. We always have 1 reference for + * the tree and 1 for this task (which is unpinning the extent map or + * clearing the logging flag), so anything > 2 means it's being used by + * other tasks too. + */ + if (refcount_read(&em->refs) > 2) + return; + + if (!can_merge_extent_map(em)) + return; + if (em->start != 0) { rb = rb_prev(&em->rb_node); - if (rb) - merge = rb_entry(rb, struct extent_map, rb_node); - if (rb && mergable_maps(merge, em)) { + merge = rb_entry_safe(rb, struct extent_map, rb_node); + + if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) { em->start = merge->start; - em->orig_start = merge->orig_start; em->len += merge->len; - em->block_len += merge->block_len; - em->block_start = merge->block_start; - merge->in_tree = 0; - em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; - em->mod_start = merge->mod_start; em->generation = max(em->generation, merge->generation); - rb_erase(&merge->rb_node, &tree->map); - free_extent_map(merge); + if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) + merge_ondisk_extents(merge, em, em); + em->flags |= EXTENT_FLAG_MERGED; + + validate_extent_map(fs_info, em); + remove_em(inode, merge); + btrfs_free_extent_map(merge); } } rb = rb_next(&em->rb_node); - if (rb) - merge = rb_entry(rb, struct extent_map, rb_node); - if (rb && mergable_maps(em, merge)) { + merge = rb_entry_safe(rb, struct extent_map, rb_node); + + if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) { em->len += merge->len; - em->block_len += merge->len; - rb_erase(&merge->rb_node, &tree->map); - merge->in_tree = 0; - em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; + if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) + merge_ondisk_extents(em, merge, em); + validate_extent_map(fs_info, em); em->generation = max(em->generation, merge->generation); - free_extent_map(merge); + em->flags |= EXTENT_FLAG_MERGED; + remove_em(inode, merge); + btrfs_free_extent_map(merge); } } -/** - * unpin_extent_cache - unpin an extent from the cache - * @tree: tree to unpin the extent in +/* + * Unpin an extent from the cache. + * + * @inode: the inode from which we are unpinning an extent range * @start: logical offset in the file * @len: length of the extent * @gen: generation that this extent has been modified in @@ -247,135 +404,138 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) * Called after an extent has been written to disk properly. Set the generation * to the generation that actually added the file item to the inode so we know * we need to sync this extent when we call fsync(). + * + * Returns: 0 on success + * -ENOENT when the extent is not found in the tree + * -EUCLEAN if the found extent does not match the expected start */ -int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, - u64 gen) +int btrfs_unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen) { + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct extent_map_tree *tree = &inode->extent_tree; int ret = 0; struct extent_map *em; - bool prealloc = false; write_lock(&tree->lock); - em = lookup_extent_mapping(tree, start, len); - - WARN_ON(!em || em->start != start); + em = btrfs_lookup_extent_mapping(tree, start, len); - if (!em) + if (WARN_ON(!em)) { + btrfs_warn(fs_info, +"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu", + btrfs_ino(inode), btrfs_root_id(inode->root), + start, start + len, gen); + ret = -ENOENT; goto out; + } - if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) - list_move(&em->list, &tree->modified_extents); - em->generation = gen; - clear_bit(EXTENT_FLAG_PINNED, &em->flags); - em->mod_start = em->start; - em->mod_len = em->len; - - if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { - prealloc = true; - clear_bit(EXTENT_FLAG_FILLING, &em->flags); + if (WARN_ON(em->start != start)) { + btrfs_warn(fs_info, +"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu", + btrfs_ino(inode), btrfs_root_id(inode->root), + em->start, start, start + len, gen); + ret = -EUCLEAN; + goto out; } - try_merge_map(tree, em); + em->generation = gen; + em->flags &= ~EXTENT_FLAG_PINNED; - if (prealloc) { - em->mod_start = em->start; - em->mod_len = em->len; - } + try_merge_map(inode, em); - free_extent_map(em); out: write_unlock(&tree->lock); + btrfs_free_extent_map(em); return ret; } -void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) +void btrfs_clear_em_logging(struct btrfs_inode *inode, struct extent_map *em) { - clear_bit(EXTENT_FLAG_LOGGING, &em->flags); - if (em->in_tree) - try_merge_map(tree, em); + lockdep_assert_held_write(&inode->extent_tree.lock); + + em->flags &= ~EXTENT_FLAG_LOGGING; + if (btrfs_extent_map_in_tree(em)) + try_merge_map(inode, em); } -/** - * add_extent_mapping - add new extent map to the extent tree - * @tree: tree to insert new map in - * @em: map to insert - * - * Insert @em into @tree or perform a simple forward/backward merge with - * existing mappings. The extent_map struct passed in will be inserted - * into the tree directly, with an additional reference taken, or a - * reference dropped if the merge attempt was successful. - */ -int add_extent_mapping(struct extent_map_tree *tree, - struct extent_map *em, int modified) +static inline void setup_extent_mapping(struct btrfs_inode *inode, + struct extent_map *em, + bool modified) { - int ret = 0; - struct rb_node *rb; - struct extent_map *exist; + refcount_inc(&em->refs); - exist = lookup_extent_mapping(tree, em->start, em->len); - if (exist) { - free_extent_map(exist); - ret = -EEXIST; - goto out; - } - rb = tree_insert(&tree->map, em->start, &em->rb_node); - if (rb) { - ret = -EEXIST; - goto out; - } - atomic_inc(&em->refs); - - em->mod_start = em->start; - em->mod_len = em->len; + ASSERT(list_empty(&em->list)); if (modified) - list_move(&em->list, &tree->modified_extents); + list_add(&em->list, &inode->extent_tree.modified_extents); else - try_merge_map(tree, em); -out: - return ret; + try_merge_map(inode, em); } -/* simple helper to do math around the end of an extent, handling wrap */ -static u64 range_end(u64 start, u64 len) +/* + * Add a new extent map to an inode's extent map tree. + * + * @inode: the target inode + * @em: map to insert + * @modified: indicate whether the given @em should be added to the + * modified list, which indicates the extent needs to be logged + * + * Insert @em into the @inode's extent map tree or perform a simple + * forward/backward merge with existing mappings. The extent_map struct passed + * in will be inserted into the tree directly, with an additional reference + * taken, or a reference dropped if the merge attempt was successful. + */ +static int add_extent_mapping(struct btrfs_inode *inode, + struct extent_map *em, bool modified) { - if (start + len < start) - return (u64)-1; - return start + len; + struct extent_map_tree *tree = &inode->extent_tree; + struct btrfs_root *root = inode->root; + struct btrfs_fs_info *fs_info = root->fs_info; + int ret; + + lockdep_assert_held_write(&tree->lock); + + validate_extent_map(fs_info, em); + ret = tree_insert(&tree->root, em); + if (ret) + return ret; + + setup_extent_mapping(inode, em, modified); + + if (!btrfs_is_testing(fs_info) && btrfs_is_fstree(btrfs_root_id(root))) + percpu_counter_inc(&fs_info->evictable_extent_maps); + + return 0; } -static struct extent_map * -__lookup_extent_mapping(struct extent_map_tree *tree, - u64 start, u64 len, int strict) +static struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, + u64 start, u64 len, bool strict) { struct extent_map *em; struct rb_node *rb_node; - struct rb_node *prev = NULL; - struct rb_node *next = NULL; + struct rb_node *prev_or_next = NULL; u64 end = range_end(start, len); - rb_node = __tree_search(&tree->map, start, &prev, &next); + rb_node = tree_search(&tree->root, start, &prev_or_next); if (!rb_node) { - if (prev) - rb_node = prev; - else if (next) - rb_node = next; + if (prev_or_next) + rb_node = prev_or_next; else return NULL; } em = rb_entry(rb_node, struct extent_map, rb_node); - if (strict && !(end > em->start && start < extent_map_end(em))) + if (strict && !(end > em->start && start < btrfs_extent_map_end(em))) return NULL; - atomic_inc(&em->refs); + refcount_inc(&em->refs); return em; } -/** - * lookup_extent_mapping - lookup extent_map +/* + * Lookup extent_map that intersects @start + @len range. + * * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range @@ -385,14 +545,15 @@ __lookup_extent_mapping(struct extent_map_tree *tree, * intersect, so check the object returned carefully to make sure that no * additional lookups are needed. */ -struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, - u64 start, u64 len) +struct extent_map *btrfs_lookup_extent_mapping(struct extent_map_tree *tree, + u64 start, u64 len) { - return __lookup_extent_mapping(tree, start, len, 1); + return lookup_extent_mapping(tree, start, len, true); } -/** - * search_extent_mapping - find a nearby extent map +/* + * Find a nearby extent map intersecting @start + @len (not an exact search). + * * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range @@ -402,28 +563,820 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, * * If one can't be found, any nearby extent may be returned */ -struct extent_map *search_extent_mapping(struct extent_map_tree *tree, - u64 start, u64 len) +struct extent_map *btrfs_search_extent_mapping(struct extent_map_tree *tree, + u64 start, u64 len) { - return __lookup_extent_mapping(tree, start, len, 0); + return lookup_extent_mapping(tree, start, len, false); } -/** - * remove_extent_mapping - removes an extent_map from the extent tree - * @tree: extent tree to remove from - * @em: extent map beeing removed +/* + * Remove an extent_map from its inode's extent tree. + * + * @inode: the inode the extent map belongs to + * @em: extent map being removed * - * Removes @em from @tree. No reference counts are dropped, and no checks - * are done to see if the range is in use + * Remove @em from the extent tree of @inode. No reference counts are dropped, + * and no checks are done to see if the range is in use. */ -int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) +void btrfs_remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em) { - int ret = 0; + struct extent_map_tree *tree = &inode->extent_tree; + + lockdep_assert_held_write(&tree->lock); - WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); - rb_erase(&em->rb_node, &tree->map); - if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) + WARN_ON(em->flags & EXTENT_FLAG_PINNED); + if (!(em->flags & EXTENT_FLAG_LOGGING)) list_del_init(&em->list); - em->in_tree = 0; + + remove_em(inode, em); +} + +static void replace_extent_mapping(struct btrfs_inode *inode, + struct extent_map *cur, + struct extent_map *new, + bool modified) +{ + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct extent_map_tree *tree = &inode->extent_tree; + + lockdep_assert_held_write(&tree->lock); + + validate_extent_map(fs_info, new); + + WARN_ON(cur->flags & EXTENT_FLAG_PINNED); + ASSERT(btrfs_extent_map_in_tree(cur)); + if (!(cur->flags & EXTENT_FLAG_LOGGING)) + list_del_init(&cur->list); + rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root); + RB_CLEAR_NODE(&cur->rb_node); + + setup_extent_mapping(inode, new, modified); +} + +static struct extent_map *next_extent_map(const struct extent_map *em) +{ + struct rb_node *next; + + next = rb_next(&em->rb_node); + if (!next) + return NULL; + return container_of(next, struct extent_map, rb_node); +} + +static struct extent_map *prev_extent_map(struct extent_map *em) +{ + struct rb_node *prev; + + prev = rb_prev(&em->rb_node); + if (!prev) + return NULL; + return container_of(prev, struct extent_map, rb_node); +} + +/* + * Helper for btrfs_get_extent. Given an existing extent in the tree, + * the existing extent is the nearest extent to map_start, + * and an extent that you want to insert, deal with overlap and insert + * the best fitted new extent into the tree. + */ +static noinline int merge_extent_mapping(struct btrfs_inode *inode, + struct extent_map *existing, + struct extent_map *em, + u64 map_start) +{ + struct extent_map *prev; + struct extent_map *next; + u64 start; + u64 end; + u64 start_diff; + + if (map_start < em->start || map_start >= btrfs_extent_map_end(em)) + return -EINVAL; + + if (existing->start > map_start) { + next = existing; + prev = prev_extent_map(next); + } else { + prev = existing; + next = next_extent_map(prev); + } + + start = prev ? btrfs_extent_map_end(prev) : em->start; + start = max_t(u64, start, em->start); + end = next ? next->start : btrfs_extent_map_end(em); + end = min_t(u64, end, btrfs_extent_map_end(em)); + start_diff = start - em->start; + em->start = start; + em->len = end - start; + if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) + em->offset += start_diff; + return add_extent_mapping(inode, em, false); +} + +/* + * Add extent mapping into an inode's extent map tree. + * + * @inode: target inode + * @em_in: extent we are inserting + * @start: start of the logical range btrfs_get_extent() is requesting + * @len: length of the logical range btrfs_get_extent() is requesting + * + * Note that @em_in's range may be different from [start, start+len), + * but they must be overlapped. + * + * Insert @em_in into the inode's extent map tree. In case there is an + * overlapping range, handle the -EEXIST by either: + * a) Returning the existing extent in @em_in if @start is within the + * existing em. + * b) Merge the existing extent with @em_in passed in. + * + * Return 0 on success, otherwise -EEXIST. + * + */ +int btrfs_add_extent_mapping(struct btrfs_inode *inode, + struct extent_map **em_in, u64 start, u64 len) +{ + int ret; + struct extent_map *em = *em_in; + struct btrfs_fs_info *fs_info = inode->root->fs_info; + + /* + * Tree-checker should have rejected any inline extent with non-zero + * file offset. Here just do a sanity check. + */ + if (em->disk_bytenr == EXTENT_MAP_INLINE) + ASSERT(em->start == 0); + + ret = add_extent_mapping(inode, em, false); + /* it is possible that someone inserted the extent into the tree + * while we had the lock dropped. It is also possible that + * an overlapping map exists in the tree + */ + if (ret == -EEXIST) { + struct extent_map *existing; + + existing = btrfs_search_extent_mapping(&inode->extent_tree, start, len); + + trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); + + /* + * existing will always be non-NULL, since there must be + * extent causing the -EEXIST. + */ + if (start >= existing->start && + start < btrfs_extent_map_end(existing)) { + btrfs_free_extent_map(em); + *em_in = existing; + ret = 0; + } else { + u64 orig_start = em->start; + u64 orig_len = em->len; + + /* + * The existing extent map is the one nearest to + * the [start, start + len) range which overlaps + */ + ret = merge_extent_mapping(inode, existing, em, start); + if (WARN_ON(ret)) { + btrfs_free_extent_map(em); + *em_in = NULL; + btrfs_warn(fs_info, +"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu", + existing->start, btrfs_extent_map_end(existing), + orig_start, orig_start + orig_len, start); + } + btrfs_free_extent_map(existing); + } + } + + ASSERT(ret == 0 || ret == -EEXIST); + return ret; +} + +/* + * Drop all extent maps from a tree in the fastest possible way, rescheduling + * if needed. This avoids searching the tree, from the root down to the first + * extent map, before each deletion. + */ +static void drop_all_extent_maps_fast(struct btrfs_inode *inode) +{ + struct extent_map_tree *tree = &inode->extent_tree; + struct rb_node *node; + + write_lock(&tree->lock); + node = rb_first(&tree->root); + while (node) { + struct extent_map *em; + struct rb_node *next = rb_next(node); + + em = rb_entry(node, struct extent_map, rb_node); + em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING); + btrfs_remove_extent_mapping(inode, em); + btrfs_free_extent_map(em); + + if (cond_resched_rwlock_write(&tree->lock)) + node = rb_first(&tree->root); + else + node = next; + } + write_unlock(&tree->lock); +} + +/* + * Drop all extent maps in a given range. + * + * @inode: The target inode. + * @start: Start offset of the range. + * @end: End offset of the range (inclusive value). + * @skip_pinned: Indicate if pinned extent maps should be ignored or not. + * + * This drops all the extent maps that intersect the given range [@start, @end]. + * Extent maps that partially overlap the range and extend behind or beyond it, + * are split. + * The caller should have locked an appropriate file range in the inode's io + * tree before calling this function. + */ +void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, + bool skip_pinned) +{ + struct extent_map *split; + struct extent_map *split2; + struct extent_map *em; + struct extent_map_tree *em_tree = &inode->extent_tree; + u64 len = end - start + 1; + + WARN_ON(end < start); + if (end == (u64)-1) { + if (start == 0 && !skip_pinned) { + drop_all_extent_maps_fast(inode); + return; + } + len = (u64)-1; + } else { + /* Make end offset exclusive for use in the loop below. */ + end++; + } + + /* + * It's ok if we fail to allocate the extent maps, see the comment near + * the bottom of the loop below. We only need two spare extent maps in + * the worst case, where the first extent map that intersects our range + * starts before the range and the last extent map that intersects our + * range ends after our range (and they might be the same extent map), + * because we need to split those two extent maps at the boundaries. + */ + split = btrfs_alloc_extent_map(); + split2 = btrfs_alloc_extent_map(); + + write_lock(&em_tree->lock); + em = btrfs_lookup_extent_mapping(em_tree, start, len); + + while (em) { + /* extent_map_end() returns exclusive value (last byte + 1). */ + const u64 em_end = btrfs_extent_map_end(em); + struct extent_map *next_em = NULL; + u64 gen; + unsigned long flags; + bool modified; + + if (em_end < end) { + next_em = next_extent_map(em); + if (next_em) { + if (next_em->start < end) + refcount_inc(&next_em->refs); + else + next_em = NULL; + } + } + + if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) { + start = em_end; + goto next; + } + + flags = em->flags; + /* + * In case we split the extent map, we want to preserve the + * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want + * it on the new extent maps. + */ + em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING); + modified = !list_empty(&em->list); + + /* + * The extent map does not cross our target range, so no need to + * split it, we can remove it directly. + */ + if (em->start >= start && em_end <= end) + goto remove_em; + + gen = em->generation; + + if (em->start < start) { + if (!split) { + split = split2; + split2 = NULL; + if (!split) + goto remove_em; + } + split->start = em->start; + split->len = start - em->start; + + if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { + split->disk_bytenr = em->disk_bytenr; + split->disk_num_bytes = em->disk_num_bytes; + split->offset = em->offset; + split->ram_bytes = em->ram_bytes; + } else { + split->disk_bytenr = em->disk_bytenr; + split->disk_num_bytes = 0; + split->offset = 0; + split->ram_bytes = split->len; + } + + split->generation = gen; + split->flags = flags; + replace_extent_mapping(inode, em, split, modified); + btrfs_free_extent_map(split); + split = split2; + split2 = NULL; + } + if (em_end > end) { + if (!split) { + split = split2; + split2 = NULL; + if (!split) + goto remove_em; + } + split->start = end; + split->len = em_end - end; + split->disk_bytenr = em->disk_bytenr; + split->flags = flags; + split->generation = gen; + + if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { + split->disk_num_bytes = em->disk_num_bytes; + split->offset = em->offset + end - em->start; + split->ram_bytes = em->ram_bytes; + } else { + split->disk_num_bytes = 0; + split->offset = 0; + split->ram_bytes = split->len; + } + + if (btrfs_extent_map_in_tree(em)) { + replace_extent_mapping(inode, em, split, modified); + } else { + int ret; + + ret = add_extent_mapping(inode, split, modified); + /* Logic error, shouldn't happen. */ + ASSERT(ret == 0); + if (WARN_ON(ret != 0) && modified) + btrfs_set_inode_full_sync(inode); + } + btrfs_free_extent_map(split); + split = NULL; + } +remove_em: + if (btrfs_extent_map_in_tree(em)) { + /* + * If the extent map is still in the tree it means that + * either of the following is true: + * + * 1) It fits entirely in our range (doesn't end beyond + * it or starts before it); + * + * 2) It starts before our range and/or ends after our + * range, and we were not able to allocate the extent + * maps for split operations, @split and @split2. + * + * If we are at case 2) then we just remove the entire + * extent map - this is fine since if anyone needs it to + * access the subranges outside our range, will just + * load it again from the subvolume tree's file extent + * item. However if the extent map was in the list of + * modified extents, then we must mark the inode for a + * full fsync, otherwise a fast fsync will miss this + * extent if it's new and needs to be logged. + */ + if ((em->start < start || em_end > end) && modified) { + ASSERT(!split); + btrfs_set_inode_full_sync(inode); + } + btrfs_remove_extent_mapping(inode, em); + } + + /* + * Once for the tree reference (we replaced or removed the + * extent map from the tree). + */ + btrfs_free_extent_map(em); +next: + /* Once for us (for our lookup reference). */ + btrfs_free_extent_map(em); + + em = next_em; + } + + write_unlock(&em_tree->lock); + + btrfs_free_extent_map(split); + btrfs_free_extent_map(split2); +} + +/* + * Replace a range in the inode's extent map tree with a new extent map. + * + * @inode: The target inode. + * @new_em: The new extent map to add to the inode's extent map tree. + * @modified: Indicate if the new extent map should be added to the list of + * modified extents (for fast fsync tracking). + * + * Drops all the extent maps in the inode's extent map tree that intersect the + * range of the new extent map and adds the new extent map to the tree. + * The caller should have locked an appropriate file range in the inode's io + * tree before calling this function. + */ +int btrfs_replace_extent_map_range(struct btrfs_inode *inode, + struct extent_map *new_em, + bool modified) +{ + const u64 end = new_em->start + new_em->len - 1; + struct extent_map_tree *tree = &inode->extent_tree; + int ret; + + ASSERT(!btrfs_extent_map_in_tree(new_em)); + + /* + * The caller has locked an appropriate file range in the inode's io + * tree, but getting -EEXIST when adding the new extent map can still + * happen in case there are extents that partially cover the range, and + * this is due to two tasks operating on different parts of the extent. + * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from + * btrfs_get_extent") for an example and details. + */ + do { + btrfs_drop_extent_map_range(inode, new_em->start, end, false); + write_lock(&tree->lock); + ret = add_extent_mapping(inode, new_em, modified); + write_unlock(&tree->lock); + } while (ret == -EEXIST); + + return ret; +} + +/* + * Split off the first pre bytes from the extent_map at [start, start + len], + * and set the block_start for it to new_logical. + * + * This function is used when an ordered_extent needs to be split. + */ +int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, + u64 new_logical) +{ + struct extent_map_tree *em_tree = &inode->extent_tree; + struct extent_map *em; + struct extent_map *split_pre = NULL; + struct extent_map *split_mid = NULL; + int ret = 0; + unsigned long flags; + + ASSERT(pre != 0); + ASSERT(pre < len); + + split_pre = btrfs_alloc_extent_map(); + if (!split_pre) + return -ENOMEM; + split_mid = btrfs_alloc_extent_map(); + if (!split_mid) { + ret = -ENOMEM; + goto out_free_pre; + } + + btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL); + write_lock(&em_tree->lock); + em = btrfs_lookup_extent_mapping(em_tree, start, len); + if (unlikely(!em)) { + ret = -EIO; + goto out_unlock; + } + + ASSERT(em->len == len); + ASSERT(!btrfs_extent_map_is_compressed(em)); + ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE); + ASSERT(em->flags & EXTENT_FLAG_PINNED); + ASSERT(!(em->flags & EXTENT_FLAG_LOGGING)); + ASSERT(!list_empty(&em->list)); + + flags = em->flags; + em->flags &= ~EXTENT_FLAG_PINNED; + + /* First, replace the em with a new extent_map starting from * em->start */ + split_pre->start = em->start; + split_pre->len = pre; + split_pre->disk_bytenr = new_logical; + split_pre->disk_num_bytes = split_pre->len; + split_pre->offset = 0; + split_pre->ram_bytes = split_pre->len; + split_pre->flags = flags; + split_pre->generation = em->generation; + + replace_extent_mapping(inode, em, split_pre, true); + + /* + * Now we only have an extent_map at: + * [em->start, em->start + pre] + */ + + /* Insert the middle extent_map. */ + split_mid->start = em->start + pre; + split_mid->len = em->len - pre; + split_mid->disk_bytenr = btrfs_extent_map_block_start(em) + pre; + split_mid->disk_num_bytes = split_mid->len; + split_mid->offset = 0; + split_mid->ram_bytes = split_mid->len; + split_mid->flags = flags; + split_mid->generation = em->generation; + add_extent_mapping(inode, split_mid, true); + + /* Once for us */ + btrfs_free_extent_map(em); + /* Once for the tree */ + btrfs_free_extent_map(em); + +out_unlock: + write_unlock(&em_tree->lock); + btrfs_unlock_extent(&inode->io_tree, start, start + len - 1, NULL); + btrfs_free_extent_map(split_mid); +out_free_pre: + btrfs_free_extent_map(split_pre); return ret; } + +struct btrfs_em_shrink_ctx { + long nr_to_scan; + long scanned; +}; + +static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx) +{ + struct btrfs_fs_info *fs_info = inode->root->fs_info; + const u64 cur_fs_gen = btrfs_get_fs_generation(fs_info); + struct extent_map_tree *tree = &inode->extent_tree; + long nr_dropped = 0; + struct rb_node *node; + + lockdep_assert_held_write(&tree->lock); + + /* + * Take the mmap lock so that we serialize with the inode logging phase + * of fsync because we may need to set the full sync flag on the inode, + * in case we have to remove extent maps in the tree's list of modified + * extents. If we set the full sync flag in the inode while an fsync is + * in progress, we may risk missing new extents because before the flag + * is set, fsync decides to only wait for writeback to complete and then + * during inode logging it sees the flag set and uses the subvolume tree + * to find new extents, which may not be there yet because ordered + * extents haven't completed yet. + * + * We also do a try lock because we don't want to block for too long and + * we are holding the extent map tree's lock in write mode. + */ + if (!down_read_trylock(&inode->i_mmap_lock)) + return 0; + + node = rb_first(&tree->root); + while (node) { + struct rb_node *next = rb_next(node); + struct extent_map *em; + + em = rb_entry(node, struct extent_map, rb_node); + ctx->scanned++; + + if (em->flags & EXTENT_FLAG_PINNED) + goto next; + + /* + * If the inode is in the list of modified extents (new) and its + * generation is the same (or is greater than) the current fs + * generation, it means it was not yet persisted so we have to + * set the full sync flag so that the next fsync will not miss + * it. + */ + if (!list_empty(&em->list) && em->generation >= cur_fs_gen) + btrfs_set_inode_full_sync(inode); + + btrfs_remove_extent_mapping(inode, em); + trace_btrfs_extent_map_shrinker_remove_em(inode, em); + /* Drop the reference for the tree. */ + btrfs_free_extent_map(em); + nr_dropped++; +next: + if (ctx->scanned >= ctx->nr_to_scan) + break; + + /* + * Stop if we need to reschedule or there's contention on the + * lock. This is to avoid slowing other tasks trying to take the + * lock. + */ + if (need_resched() || rwlock_needbreak(&tree->lock) || + btrfs_fs_closing(fs_info)) + break; + node = next; + } + up_read(&inode->i_mmap_lock); + + return nr_dropped; +} + +static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root, + u64 min_ino) +{ + struct btrfs_inode *inode; + unsigned long from = min_ino; + + xa_lock(&root->inodes); + while (true) { + struct extent_map_tree *tree; + + inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT); + if (!inode) + break; + + tree = &inode->extent_tree; + + /* + * We want to be fast so if the lock is busy we don't want to + * spend time waiting for it (some task is about to do IO for + * the inode). + */ + if (!write_trylock(&tree->lock)) + goto next; + + /* + * Skip inode if it doesn't have loaded extent maps, so we avoid + * getting a reference and doing an iput later. This includes + * cases like files that were opened for things like stat(2), or + * files with all extent maps previously released through the + * release folio callback (btrfs_release_folio()) or released in + * a previous run, or directories which never have extent maps. + */ + if (RB_EMPTY_ROOT(&tree->root)) { + write_unlock(&tree->lock); + goto next; + } + + if (igrab(&inode->vfs_inode)) + break; + + write_unlock(&tree->lock); +next: + from = btrfs_ino(inode) + 1; + cond_resched_lock(&root->inodes.xa_lock); + } + xa_unlock(&root->inodes); + + return inode; +} + +static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_inode *inode; + long nr_dropped = 0; + u64 min_ino = fs_info->em_shrinker_last_ino + 1; + + inode = find_first_inode_to_shrink(root, min_ino); + while (inode) { + nr_dropped += btrfs_scan_inode(inode, ctx); + write_unlock(&inode->extent_tree.lock); + + min_ino = btrfs_ino(inode) + 1; + fs_info->em_shrinker_last_ino = btrfs_ino(inode); + iput(&inode->vfs_inode); + + if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info)) + break; + + cond_resched(); + + inode = find_first_inode_to_shrink(root, min_ino); + } + + if (inode) { + /* + * There are still inodes in this root or we happened to process + * the last one and reached the scan limit. In either case set + * the current root to this one, so we'll resume from the next + * inode if there is one or we will find out this was the last + * one and move to the next root. + */ + fs_info->em_shrinker_last_root = btrfs_root_id(root); + } else { + /* + * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so + * that when processing the next root we start from its first inode. + */ + fs_info->em_shrinker_last_ino = 0; + fs_info->em_shrinker_last_root = btrfs_root_id(root) + 1; + } + + return nr_dropped; +} + +static void btrfs_extent_map_shrinker_worker(struct work_struct *work) +{ + struct btrfs_fs_info *fs_info; + struct btrfs_em_shrink_ctx ctx; + u64 start_root_id; + u64 next_root_id; + bool cycled = false; + long nr_dropped = 0; + + fs_info = container_of(work, struct btrfs_fs_info, em_shrinker_work); + + ctx.scanned = 0; + ctx.nr_to_scan = atomic64_read(&fs_info->em_shrinker_nr_to_scan); + + start_root_id = fs_info->em_shrinker_last_root; + next_root_id = fs_info->em_shrinker_last_root; + + if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) { + s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); + + trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr); + } + + while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) { + struct btrfs_root *root; + unsigned long count; + + cond_resched(); + + spin_lock(&fs_info->fs_roots_radix_lock); + count = radix_tree_gang_lookup(&fs_info->fs_roots_radix, + (void **)&root, + (unsigned long)next_root_id, 1); + if (count == 0) { + spin_unlock(&fs_info->fs_roots_radix_lock); + if (start_root_id > 0 && !cycled) { + next_root_id = 0; + fs_info->em_shrinker_last_root = 0; + fs_info->em_shrinker_last_ino = 0; + cycled = true; + continue; + } + break; + } + next_root_id = btrfs_root_id(root) + 1; + root = btrfs_grab_root(root); + spin_unlock(&fs_info->fs_roots_radix_lock); + + if (!root) + continue; + + if (btrfs_is_fstree(btrfs_root_id(root))) + nr_dropped += btrfs_scan_root(root, &ctx); + + btrfs_put_root(root); + } + + if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) { + s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); + + trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr); + } + + atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0); +} + +void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) +{ + /* + * Do nothing if the shrinker is already running. In case of high memory + * pressure we can have a lot of tasks calling us and all passing the + * same nr_to_scan value, but in reality we may need only to free + * nr_to_scan extent maps (or less). In case we need to free more than + * that, we will be called again by the fs shrinker, so no worries about + * not doing enough work to reclaim memory from extent maps. + * We can also be repeatedly called with the same nr_to_scan value + * simply because the shrinker runs asynchronously and multiple calls + * to this function are made before the shrinker does enough progress. + * + * That's why we set the atomic counter to nr_to_scan only if its + * current value is zero, instead of incrementing the counter by + * nr_to_scan. + */ + if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0) + return; + + queue_work(system_dfl_wq, &fs_info->em_shrinker_work); +} + +void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info) +{ + atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0); + INIT_WORK(&fs_info->em_shrinker_work, btrfs_extent_map_shrinker_worker); +} |
