summaryrefslogtreecommitdiff
path: root/fs/btrfs/lru_cache.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-20 12:54:27 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-20 12:54:27 -0800
commit885ce48739189fac6645ff42d736ee0de0b5917d (patch)
treed69300909ff5b4a0291a7311cd25cf90801a650a /fs/btrfs/lru_cache.c
parent274978f173276c5720a3cd8d0b6047d2c0d3a684 (diff)
parent964a54e5e1a0d70cd80bd5a0885a1938463625b1 (diff)
Merge tag 'for-6.3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba: "The usual mix of performance improvements and new features. The core change is reworking how checksums are processed, with followup cleanups and simplifications. There are two minor changes in block layer and iomap code. Features: - block group allocation class heuristics: - pack files by size (up to 128k, up to 8M, more) to avoid fragmentation in block groups, assuming that file size and life time is correlated, in particular this may help during balance - with tracepoints and extensible in the future Performance: - send: cache directory utimes and only emit the command when necessary - speedup up to 10x - smaller final stream produced (no redundant utimes commands issued) - compatibility not affected - fiemap: skip backref checks for shared leaves - speedup 3x on sample filesystem with all leaves shared (e.g. on snapshots) - micro optimized b-tree key lookup, speedup in metadata operations (sample benchmark: fs_mark +10% of files/sec) Core changes: - change where checksumming is done in the io path: - checksum and read repair does verification at lower layer - cascaded cleanups and simplifications - raid56 refactoring and cleanups Fixes: - sysfs: make sure that a run-time change of a feature is correctly tracked by the feature files - scrub: better reporting of tree block errors Other: - locally enable -Wmaybe-uninitialized after fixing all warnings - misc cleanups, spelling fixes Other code: - block: export bio_split_rw - iomap: remove IOMAP_F_ZONE_APPEND" * tag 'for-6.3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (109 commits) btrfs: make kobj_type structures constant btrfs: remove the bdev argument to btrfs_rmap_block btrfs: don't rely on unchanging ->bi_bdev for zone append remaps btrfs: never return true for reads in btrfs_use_zone_append btrfs: pass a btrfs_bio to btrfs_use_append btrfs: set bbio->file_offset in alloc_new_bio btrfs: use file_offset to limit bios size in calc_bio_boundaries btrfs: do unsigned integer division in the extent buffer binary search loop btrfs: eliminate extra call when doing binary search on extent buffer btrfs: raid56: handle endio in scrub_rbio btrfs: raid56: handle endio in recover_rbio btrfs: raid56: handle endio in rmw_rbio btrfs: raid56: submit the read bios from scrub_assemble_read_bios btrfs: raid56: fold rmw_read_wait_recover into rmw_read_bios btrfs: raid56: fold recover_assemble_read_bios into recover_rbio btrfs: raid56: add a bio_list_put helper btrfs: raid56: wait for I/O completion in submit_read_bios btrfs: raid56: simplify code flow in rmw_rbio btrfs: raid56: simplify error handling and code flow in raid56_parity_write btrfs: replace btrfs_wait_tree_block_writeback by wait_on_extent_buffer_writeback ...
Diffstat (limited to 'fs/btrfs/lru_cache.c')
-rw-r--r--fs/btrfs/lru_cache.c166
1 files changed, 166 insertions, 0 deletions
diff --git a/fs/btrfs/lru_cache.c b/fs/btrfs/lru_cache.c
new file mode 100644
index 000000000000..0fe0ae54ac67
--- /dev/null
+++ b/fs/btrfs/lru_cache.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include "lru_cache.h"
+#include "messages.h"
+
+/*
+ * Initialize a cache object.
+ *
+ * @cache: The cache.
+ * @max_size: Maximum size (number of entries) for the cache.
+ * Use 0 for unlimited size, it's the user's responsability to
+ * trim the cache in that case.
+ */
+void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size)
+{
+ INIT_LIST_HEAD(&cache->lru_list);
+ mt_init(&cache->entries);
+ cache->size = 0;
+ cache->max_size = max_size;
+}
+
+static struct btrfs_lru_cache_entry *match_entry(struct list_head *head, u64 key,
+ u64 gen)
+{
+ struct btrfs_lru_cache_entry *entry;
+
+ list_for_each_entry(entry, head, list) {
+ if (entry->key == key && entry->gen == gen)
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*
+ * Lookup for an entry in the cache.
+ *
+ * @cache: The cache.
+ * @key: The key of the entry we are looking for.
+ * @gen: Generation associated to the key.
+ *
+ * Returns the entry associated with the key or NULL if none found.
+ */
+struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
+ u64 key, u64 gen)
+{
+ struct list_head *head;
+ struct btrfs_lru_cache_entry *entry;
+
+ head = mtree_load(&cache->entries, key);
+ if (!head)
+ return NULL;
+
+ entry = match_entry(head, key, gen);
+ if (entry)
+ list_move_tail(&entry->lru_list, &cache->lru_list);
+
+ return entry;
+}
+
+/*
+ * Remove an entry from the cache.
+ *
+ * @cache: The cache to remove from.
+ * @entry: The entry to remove from the cache.
+ *
+ * Note: this also frees the memory used by the entry.
+ */
+void btrfs_lru_cache_remove(struct btrfs_lru_cache *cache,
+ struct btrfs_lru_cache_entry *entry)
+{
+ struct list_head *prev = entry->list.prev;
+
+ ASSERT(cache->size > 0);
+ ASSERT(!mtree_empty(&cache->entries));
+
+ list_del(&entry->list);
+ list_del(&entry->lru_list);
+
+ if (list_empty(prev)) {
+ struct list_head *head;
+
+ /*
+ * If previous element in the list entry->list is now empty, it
+ * means it's a head entry not pointing to any cached entries,
+ * so remove it from the maple tree and free it.
+ */
+ head = mtree_erase(&cache->entries, entry->key);
+ ASSERT(head == prev);
+ kfree(head);
+ }
+
+ kfree(entry);
+ cache->size--;
+}
+
+/*
+ * Store an entry in the cache.
+ *
+ * @cache: The cache.
+ * @entry: The entry to store.
+ *
+ * Returns 0 on success and < 0 on error.
+ */
+int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
+ struct btrfs_lru_cache_entry *new_entry,
+ gfp_t gfp)
+{
+ const u64 key = new_entry->key;
+ struct list_head *head;
+ int ret;
+
+ head = kmalloc(sizeof(*head), gfp);
+ if (!head)
+ return -ENOMEM;
+
+ ret = mtree_insert(&cache->entries, key, head, gfp);
+ if (ret == 0) {
+ INIT_LIST_HEAD(head);
+ list_add_tail(&new_entry->list, head);
+ } else if (ret == -EEXIST) {
+ kfree(head);
+ head = mtree_load(&cache->entries, key);
+ ASSERT(head != NULL);
+ if (match_entry(head, key, new_entry->gen) != NULL)
+ return -EEXIST;
+ list_add_tail(&new_entry->list, head);
+ } else if (ret < 0) {
+ kfree(head);
+ return ret;
+ }
+
+ if (cache->max_size > 0 && cache->size == cache->max_size) {
+ struct btrfs_lru_cache_entry *lru_entry;
+
+ lru_entry = list_first_entry(&cache->lru_list,
+ struct btrfs_lru_cache_entry,
+ lru_list);
+ btrfs_lru_cache_remove(cache, lru_entry);
+ }
+
+ list_add_tail(&new_entry->lru_list, &cache->lru_list);
+ cache->size++;
+
+ return 0;
+}
+
+/*
+ * Empty a cache.
+ *
+ * @cache: The cache to empty.
+ *
+ * Removes all entries from the cache.
+ */
+void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache)
+{
+ struct btrfs_lru_cache_entry *entry;
+ struct btrfs_lru_cache_entry *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &cache->lru_list, lru_list)
+ btrfs_lru_cache_remove(cache, entry);
+
+ ASSERT(cache->size == 0);
+ ASSERT(mtree_empty(&cache->entries));
+}