From d1d7737fd9df0cc57cd276b0189faf8c92c1426f Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 4 Apr 2022 01:09:26 -0400 Subject: bcachefs: Gap buffer for journal keys Btree updates before we go RW work by inserting into the array of keys that journal replay will insert - but inserting into a flat array is O(n), meaning if btree_gc needs to update many alloc keys, we're O(n^2). Fortunately, the updates btree_gc does happens in sequential order, which means a gap buffer works nicely here - this patch implements a gap buffer for journal keys. Signed-off-by: Kent Overstreet --- fs/bcachefs/util.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'fs/bcachefs/util.h') diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index 1629d279f494..74bfa5faf470 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -826,6 +826,31 @@ do { \ #define array_remove_item(_array, _nr, _pos) \ array_remove_items(_array, _nr, _pos, 1) +static inline void __move_gap(void *array, size_t element_size, + size_t nr, size_t size, + size_t old_gap, size_t new_gap) +{ + size_t gap_end = old_gap + size - nr; + + if (new_gap < old_gap) { + size_t move = old_gap - new_gap; + + memmove(array + element_size * (gap_end - move), + array + element_size * (old_gap - move), + element_size * move); + } else if (new_gap > old_gap) { + size_t move = new_gap - old_gap; + + memmove(array + element_size * old_gap, + array + element_size * gap_end, + element_size * move); + } +} + +/* Move the gap in a gap buffer: */ +#define move_gap(_array, _nr, _size, _old_gap, _new_gap) \ + __move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap) + #define bubble_sort(_base, _nr, _cmp) \ do { \ ssize_t _i, _end; \ -- cgit