summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/bset.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/bset.c')
-rw-r--r--drivers/md/bcache/bset.c1148
1 files changed, 673 insertions, 475 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 1d27d3af3251..463eb13bd0b2 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Code for working with individual keys, and sorted sets of keys with in a
* btree node
@@ -5,175 +6,183 @@
* Copyright 2012 Google, Inc.
*/
-#include "bcache.h"
-#include "btree.h"
-#include "debug.h"
+#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
+#include "util.h"
+#include "bset.h"
+
+#include <linux/console.h>
+#include <linux/sched/clock.h>
#include <linux/random.h>
#include <linux/prefetch.h>
-/* Keylists */
+#ifdef CONFIG_BCACHE_DEBUG
-void bch_keylist_copy(struct keylist *dest, struct keylist *src)
+void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
{
- *dest = *src;
+ struct bkey *k, *next;
+
+ for (k = i->start; k < bset_bkey_last(i); k = next) {
+ next = bkey_next(k);
+
+ pr_err("block %u key %u/%u: ", set,
+ (unsigned int) ((u64 *) k - i->d), i->keys);
- if (src->list == src->d) {
- size_t n = (uint64_t *) src->top - src->d;
- dest->top = (struct bkey *) &dest->d[n];
- dest->list = dest->d;
+ if (b->ops->key_dump)
+ b->ops->key_dump(b, k);
+ else
+ pr_cont("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
+
+ if (next < bset_bkey_last(i) &&
+ bkey_cmp(k, b->ops->is_extents ?
+ &START_KEY(next) : next) > 0)
+ pr_err("Key skipped backwards\n");
}
}
-int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
+void bch_dump_bucket(struct btree_keys *b)
{
- unsigned oldsize = (uint64_t *) l->top - l->list;
- unsigned newsize = oldsize + 2 + nptrs;
- uint64_t *new;
-
- /* The journalling code doesn't handle the case where the keys to insert
- * is bigger than an empty write: If we just return -ENOMEM here,
- * bio_insert() and bio_invalidate() will insert the keys created so far
- * and finish the rest when the keylist is empty.
- */
- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
- return -ENOMEM;
+ unsigned int i;
- newsize = roundup_pow_of_two(newsize);
+ console_lock();
+ for (i = 0; i <= b->nsets; i++)
+ bch_dump_bset(b, b->set[i].data,
+ bset_sector_offset(b, b->set[i].data));
+ console_unlock();
+}
- if (newsize <= KEYLIST_INLINE ||
- roundup_pow_of_two(oldsize) == newsize)
- return 0;
+int __bch_count_data(struct btree_keys *b)
+{
+ unsigned int ret = 0;
+ struct btree_iter_stack iter;
+ struct bkey *k;
+
+ if (b->ops->is_extents)
+ for_each_key(b, k, &iter)
+ ret += KEY_SIZE(k);
+ return ret;
+}
- new = krealloc(l->list == l->d ? NULL : l->list,
- sizeof(uint64_t) * newsize, GFP_NOIO);
+void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+{
+ va_list args;
+ struct bkey *k, *p = NULL;
+ struct btree_iter_stack iter;
+ const char *err;
+
+ for_each_key(b, k, &iter) {
+ if (b->ops->is_extents) {
+ err = "Keys out of order";
+ if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+ goto bug;
+
+ if (bch_ptr_invalid(b, k))
+ continue;
- if (!new)
- return -ENOMEM;
+ err = "Overlapping keys";
+ if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+ goto bug;
+ } else {
+ if (bch_ptr_bad(b, k))
+ continue;
- if (l->list == l->d)
- memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE);
+ err = "Duplicate keys";
+ if (p && !bkey_cmp(p, k))
+ goto bug;
+ }
+ p = k;
+ }
+#if 0
+ err = "Key larger than btree node key";
+ if (p && bkey_cmp(p, &b->key) > 0)
+ goto bug;
+#endif
+ return;
+bug:
+ bch_dump_bucket(b);
- l->list = new;
- l->top = (struct bkey *) (&l->list[oldsize]);
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
- return 0;
+ panic("bch_check_keys error: %s:\n", err);
}
-struct bkey *bch_keylist_pop(struct keylist *l)
+static void bch_btree_iter_next_check(struct btree_iter *iter)
{
- struct bkey *k = l->bottom;
+ struct bkey *k = iter->data->k, *next = bkey_next(k);
- if (k == l->top)
- return NULL;
-
- while (bkey_next(k) != l->top)
- k = bkey_next(k);
-
- return l->top = k;
+ if (next < iter->data->end &&
+ bkey_cmp(k, iter->b->ops->is_extents ?
+ &START_KEY(next) : next) > 0) {
+ bch_dump_bucket(iter->b);
+ panic("Key skipped backwards\n");
+ }
}
-/* Pointer validation */
+#else
-bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
-{
- unsigned i;
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
- if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
- goto bad;
+#endif
- if (!level && KEY_SIZE(k) > KEY_OFFSET(k))
- goto bad;
+/* Keylists */
- if (!KEY_SIZE(k))
- return true;
+int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
+{
+ size_t oldsize = bch_keylist_nkeys(l);
+ size_t newsize = oldsize + u64s;
+ uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
+ uint64_t *new_keys;
- for (i = 0; i < KEY_PTRS(k); i++)
- if (ptr_available(c, k, i)) {
- struct cache *ca = PTR_CACHE(c, k, i);
- size_t bucket = PTR_BUCKET_NR(c, k, i);
- size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
-
- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
- bucket < ca->sb.first_bucket ||
- bucket >= ca->sb.nbuckets)
- goto bad;
- }
+ newsize = roundup_pow_of_two(newsize);
- return false;
-bad:
- cache_bug(c, "spotted bad key %s: %s", pkey(k), bch_ptr_status(c, k));
- return true;
-}
+ if (newsize <= KEYLIST_INLINE ||
+ roundup_pow_of_two(oldsize) == newsize)
+ return 0;
-bool bch_ptr_bad(struct btree *b, const struct bkey *k)
-{
- struct bucket *g;
- unsigned i, stale;
+ new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
- if (!bkey_cmp(k, &ZERO_KEY) ||
- !KEY_PTRS(k) ||
- bch_ptr_invalid(b, k))
- return true;
+ if (!new_keys)
+ return -ENOMEM;
- if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV)
- return true;
+ if (!old_keys)
+ memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
- for (i = 0; i < KEY_PTRS(k); i++)
- if (ptr_available(b->c, k, i)) {
- g = PTR_BUCKET(b->c, k, i);
- stale = ptr_stale(b->c, k, i);
+ l->keys_p = new_keys;
+ l->top_p = new_keys + oldsize;
- btree_bug_on(stale > 96, b,
- "key too stale: %i, need_gc %u",
- stale, b->c->need_gc);
+ return 0;
+}
- btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
- b, "stale dirty pointer");
+/* Pop the top key of keylist by pointing l->top to its previous key */
+struct bkey *bch_keylist_pop(struct keylist *l)
+{
+ struct bkey *k = l->keys;
- if (stale)
- return true;
+ if (k == l->top)
+ return NULL;
-#ifdef CONFIG_BCACHE_EDEBUG
- if (!mutex_trylock(&b->c->bucket_lock))
- continue;
+ while (bkey_next(k) != l->top)
+ k = bkey_next(k);
- if (b->level) {
- if (KEY_DIRTY(k) ||
- g->prio != BTREE_PRIO ||
- (b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_METADATA))
- goto bug;
-
- } else {
- if (g->prio == BTREE_PRIO)
- goto bug;
-
- if (KEY_DIRTY(k) &&
- b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_DIRTY)
- goto bug;
- }
- mutex_unlock(&b->c->bucket_lock);
-#endif
- }
+ return l->top = k;
+}
- return false;
-#ifdef CONFIG_BCACHE_EDEBUG
-bug:
- mutex_unlock(&b->c->bucket_lock);
- btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
- pkey(k), PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
- return true;
-#endif
+/* Pop the bottom key of keylist and update l->top_p */
+void bch_keylist_pop_front(struct keylist *l)
+{
+ l->top_p -= bkey_u64s(l->keys);
+
+ memmove(l->keys,
+ bkey_next(l->keys),
+ bch_keylist_bytes(l));
}
/* Key/pointer manipulation */
void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
- unsigned i)
+ unsigned int i)
{
BUG_ON(i > KEY_PTRS(src));
@@ -187,7 +196,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
bool __bch_cut_front(const struct bkey *where, struct bkey *k)
{
- unsigned i, len = 0;
+ unsigned int i, len = 0;
if (bkey_cmp(where, &START_KEY(k)) <= 0)
return false;
@@ -207,7 +216,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k)
bool __bch_cut_back(const struct bkey *where, struct bkey *k)
{
- unsigned len = 0;
+ unsigned int len = 0;
if (bkey_cmp(where, k) >= 0)
return false;
@@ -224,60 +233,142 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
return true;
}
-static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+/* Auxiliary search trees */
+
+/* 32 bits total: */
+#define BKEY_MID_BITS 3
+#define BKEY_EXPONENT_BITS 7
+#define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
+#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
+
+struct bkey_float {
+ unsigned int exponent:BKEY_EXPONENT_BITS;
+ unsigned int m:BKEY_MID_BITS;
+ unsigned int mantissa:BKEY_MANTISSA_BITS;
+} __packed;
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE 128
+
+/* Space required for the btree node keys */
+static inline size_t btree_keys_bytes(struct btree_keys *b)
{
- return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
- ~((uint64_t)1 << 63);
+ return PAGE_SIZE << b->page_order;
}
-/* Tries to merge l and r: l should be lower than r
- * Returns true if we were able to merge. If we did merge, l will be the merged
- * key, r will be untouched.
- */
-bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
+static inline size_t btree_keys_cachelines(struct btree_keys *b)
{
- unsigned i;
+ return btree_keys_bytes(b) / BSET_CACHELINE;
+}
- if (key_merging_disabled(b->c))
- return false;
+/* Space required for the auxiliary search trees */
+static inline size_t bset_tree_bytes(struct btree_keys *b)
+{
+ return btree_keys_cachelines(b) * sizeof(struct bkey_float);
+}
- if (KEY_PTRS(l) != KEY_PTRS(r) ||
- KEY_DIRTY(l) != KEY_DIRTY(r) ||
- bkey_cmp(l, &START_KEY(r)))
- return false;
+/* Space required for the prev pointers */
+static inline size_t bset_prev_bytes(struct btree_keys *b)
+{
+ return btree_keys_cachelines(b) * sizeof(uint8_t);
+}
- for (i = 0; i < KEY_PTRS(l); i++)
- if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
- PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
- return false;
+/* Memory allocation */
- /* Keys with no pointers aren't restricted to one bucket and could
- * overflow KEY_SIZE
- */
- if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
- SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
- SET_KEY_SIZE(l, USHRT_MAX);
+void bch_btree_keys_free(struct btree_keys *b)
+{
+ struct bset_tree *t = b->set;
- bch_cut_front(l, r);
- return false;
- }
+ if (bset_prev_bytes(b) < PAGE_SIZE)
+ kfree(t->prev);
+ else
+ free_pages((unsigned long) t->prev,
+ get_order(bset_prev_bytes(b)));
- if (KEY_CSUM(l)) {
- if (KEY_CSUM(r))
- l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
- else
- SET_KEY_CSUM(l, 0);
- }
+ if (bset_tree_bytes(b) < PAGE_SIZE)
+ kfree(t->tree);
+ else
+ free_pages((unsigned long) t->tree,
+ get_order(bset_tree_bytes(b)));
- SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
- SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+ free_pages((unsigned long) t->data, b->page_order);
- return true;
+ t->prev = NULL;
+ t->tree = NULL;
+ t->data = NULL;
+}
+
+int bch_btree_keys_alloc(struct btree_keys *b,
+ unsigned int page_order,
+ gfp_t gfp)
+{
+ struct bset_tree *t = b->set;
+
+ BUG_ON(t->data);
+
+ b->page_order = page_order;
+
+ t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
+ if (!t->data)
+ goto err;
+
+ t->tree = bset_tree_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_tree_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
+ if (!t->tree)
+ goto err;
+
+ t->prev = bset_prev_bytes(b) < PAGE_SIZE
+ ? kmalloc(bset_prev_bytes(b), gfp)
+ : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
+ if (!t->prev)
+ goto err;
+
+ return 0;
+err:
+ bch_btree_keys_free(b);
+ return -ENOMEM;
+}
+
+void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
+ bool *expensive_debug_checks)
+{
+ b->ops = ops;
+ b->expensive_debug_checks = expensive_debug_checks;
+ b->nsets = 0;
+ b->last_set_unwritten = 0;
+
+ /*
+ * struct btree_keys in embedded in struct btree, and struct
+ * bset_tree is embedded into struct btree_keys. They are all
+ * initialized as 0 by kzalloc() in mca_bucket_alloc(), and
+ * b->set[0].data is allocated in bch_btree_keys_alloc(), so we
+ * don't have to initiate b->set[].size and b->set[].data here
+ * any more.
+ */
}
/* Binary tree stuff for auxiliary search trees */
-static unsigned inorder_next(unsigned j, unsigned size)
+/*
+ * return array index next to j when does in-order traverse
+ * of a binary tree which is stored in a linear array
+ */
+static unsigned int inorder_next(unsigned int j, unsigned int size)
{
if (j * 2 + 1 < size) {
j = j * 2 + 1;
@@ -290,7 +381,11 @@ static unsigned inorder_next(unsigned j, unsigned size)
return j;
}
-static unsigned inorder_prev(unsigned j, unsigned size)
+/*
+ * return array index previous to j when does in-order traverse
+ * of a binary tree which is stored in a linear array
+ */
+static unsigned int inorder_prev(unsigned int j, unsigned int size)
{
if (j * 2 < size) {
j = j * 2;
@@ -303,7 +398,8 @@ static unsigned inorder_prev(unsigned j, unsigned size)
return j;
}
-/* I have no idea why this code works... and I'm the one who wrote it
+/*
+ * I have no idea why this code works... and I'm the one who wrote it
*
* However, I do know what it does:
* Given a binary tree constructed in an array (i.e. how you normally implement
@@ -316,10 +412,12 @@ static unsigned inorder_prev(unsigned j, unsigned size)
* extra is a function of size:
* extra = (size - rounddown_pow_of_two(size - 1)) << 1;
*/
-static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
+static unsigned int __to_inorder(unsigned int j,
+ unsigned int size,
+ unsigned int extra)
{
- unsigned b = fls(j);
- unsigned shift = fls(size - 1) - b;
+ unsigned int b = fls(j);
+ unsigned int shift = fls(size - 1) - b;
j ^= 1U << (b - 1);
j <<= 1;
@@ -332,14 +430,20 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
return j;
}
-static unsigned to_inorder(unsigned j, struct bset_tree *t)
+/*
+ * Return the cacheline index in bset_tree->data, where j is index
+ * from a linear array which stores the auxiliar binary tree
+ */
+static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
{
return __to_inorder(j, t->size, t->extra);
}
-static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
+static unsigned int __inorder_to_tree(unsigned int j,
+ unsigned int size,
+ unsigned int extra)
{
- unsigned shift;
+ unsigned int shift;
if (j > extra)
j += j - extra;
@@ -352,7 +456,11 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
return j;
}
-static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
+/*
+ * Return an index from a linear array which stores the auxiliar binary
+ * tree, j is the cacheline index of t->data.
+ */
+static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
{
return __inorder_to_tree(j, t->size, t->extra);
}
@@ -363,14 +471,15 @@ void inorder_test(void)
unsigned long done = 0;
ktime_t start = ktime_get();
- for (unsigned size = 2;
+ for (unsigned int size = 2;
size < 65536000;
size++) {
- unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
- unsigned i = 1, j = rounddown_pow_of_two(size - 1);
+ unsigned int extra =
+ (size - rounddown_pow_of_two(size - 1)) << 1;
+ unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
if (!(size % 4096))
- printk(KERN_NOTICE "loop %u, %llu per us\n", size,
+ pr_notice("loop %u, %llu per us\n", size,
done / ktime_us_delta(ktime_get(), start));
while (1) {
@@ -413,28 +522,31 @@ void inorder_test(void)
* of the previous key so we can walk backwards to it from t->tree[j]'s key.
*/
-static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
- unsigned offset)
+static struct bkey *cacheline_to_bkey(struct bset_tree *t,
+ unsigned int cacheline,
+ unsigned int offset)
{
return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
}
-static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
+static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
{
return ((void *) k - (void *) t->data) / BSET_CACHELINE;
}
-static unsigned bkey_to_cacheline_offset(struct bkey *k)
+static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
+ unsigned int cacheline,
+ struct bkey *k)
{
- return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
+ return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
}
-static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
+static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
{
return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
}
-static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
+static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
{
return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
}
@@ -443,34 +555,41 @@ static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
* For the write set - the one we're currently inserting keys into - we don't
* maintain a full search tree, we just keep a simple lookup table in t->prev.
*/
-static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
+static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
{
return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
}
static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
{
-#ifdef CONFIG_X86_64
- asm("shrd %[shift],%[high],%[low]"
- : [low] "+Rm" (low)
- : [high] "R" (high),
- [shift] "ci" (shift)
- : "cc");
-#else
low >>= shift;
low |= (high << 1) << (63U - shift);
-#endif
return low;
}
-static inline unsigned bfloat_mantissa(const struct bkey *k,
+/*
+ * Calculate mantissa value for struct bkey_float.
+ * If most significant bit of f->exponent is not set, then
+ * - f->exponent >> 6 is 0
+ * - p[0] points to bkey->low
+ * - p[-1] borrows bits from KEY_INODE() of bkey->high
+ * if most isgnificant bits of f->exponent is set, then
+ * - f->exponent >> 6 is 1
+ * - p[0] points to bits from KEY_INODE() of bkey->high
+ * - p[-1] points to other bits from KEY_INODE() of
+ * bkey->high too.
+ * See make_bfloat() to check when most significant bit of f->exponent
+ * is set or not.
+ */
+static inline unsigned int bfloat_mantissa(const struct bkey *k,
struct bkey_float *f)
{
const uint64_t *p = &k->low - (f->exponent >> 6);
+
return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
}
-static void make_bfloat(struct bset_tree *t, unsigned j)
+static void make_bfloat(struct bset_tree *t, unsigned int j)
{
struct bkey_float *f = &t->tree[j];
struct bkey *m = tree_to_bkey(t, j);
@@ -481,12 +600,22 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
: tree_to_prev_bkey(t, j >> ffs(j));
struct bkey *r = is_power_of_2(j + 1)
- ? node(t->data, t->data->keys - bkey_u64s(&t->end))
+ ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
: tree_to_bkey(t, j >> (ffz(j) + 1));
BUG_ON(m < l || m > r);
BUG_ON(bkey_next(p) != m);
+ /*
+ * If l and r have different KEY_INODE values (different backing
+ * device), f->exponent records how many least significant bits
+ * are different in KEY_INODE values and sets most significant
+ * bits to 1 (by +64).
+ * If l and r have same KEY_INODE value, f->exponent records
+ * how many different bits in least significant bits of bkey->low.
+ * See bfloat_mantiss() how the most significant bit of
+ * f->exponent is used to calculate bfloat mantissa value.
+ */
if (KEY_INODE(l) != KEY_INODE(r))
f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
else
@@ -505,43 +634,72 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
f->exponent = 127;
}
-static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
+static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
{
- if (t != b->sets) {
- unsigned j = roundup(t[-1].size,
+ if (t != b->set) {
+ unsigned int j = roundup(t[-1].size,
64 / sizeof(struct bkey_float));
t->tree = t[-1].tree + j;
t->prev = t[-1].prev + j;
}
- while (t < b->sets + MAX_BSETS)
+ while (t < b->set + MAX_BSETS)
t++->size = 0;
}
-static void bset_build_unwritten_tree(struct btree *b)
+static void bch_bset_build_unwritten_tree(struct btree_keys *b)
{
- struct bset_tree *t = b->sets + b->nsets;
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON(b->last_set_unwritten);
+ b->last_set_unwritten = 1;
bset_alloc_tree(b, t);
- if (t->tree != b->sets->tree + bset_tree_space(b)) {
- t->prev[0] = bkey_to_cacheline_offset(t->data->start);
+ if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
+ t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
t->size = 1;
}
}
-static void bset_build_written_tree(struct btree *b)
+void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
+{
+ if (i != b->set->data) {
+ b->set[++b->nsets].data = i;
+ i->seq = b->set->data->seq;
+ } else
+ get_random_bytes(&i->seq, sizeof(uint64_t));
+
+ i->magic = magic;
+ i->version = 0;
+ i->keys = 0;
+
+ bch_bset_build_unwritten_tree(b);
+}
+
+/*
+ * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to
+ * accelerate bkey search in a btree node (pointed by bset_tree->data in
+ * memory). After search in the auxiliar tree by calling bset_search_tree(),
+ * a struct bset_search_iter is returned which indicates range [l, r] from
+ * bset_tree->data where the searching bkey might be inside. Then a followed
+ * linear comparison does the exact search, see __bch_bset_search() for how
+ * the auxiliary tree is used.
+ */
+void bch_bset_build_written_tree(struct btree_keys *b)
{
- struct bset_tree *t = b->sets + b->nsets;
- struct bkey *k = t->data->start;
- unsigned j, cacheline = 1;
+ struct bset_tree *t = bset_tree_last(b);
+ struct bkey *prev = NULL, *k = t->data->start;
+ unsigned int j, cacheline = 1;
+
+ b->last_set_unwritten = 0;
bset_alloc_tree(b, t);
- t->size = min_t(unsigned,
- bkey_to_cacheline(t, end(t->data)),
- b->sets->tree + bset_tree_space(b) - t->tree);
+ t->size = min_t(unsigned int,
+ bkey_to_cacheline(t, bset_bkey_last(t->data)),
+ b->set->tree + btree_keys_cachelines(b) - t->tree);
if (t->size < 2) {
t->size = 0;
@@ -554,16 +712,16 @@ static void bset_build_written_tree(struct btree *b)
for (j = inorder_next(0, t->size);
j;
j = inorder_next(j, t->size)) {
- while (bkey_to_cacheline(t, k) != cacheline)
+ while (bkey_to_cacheline(t, k) < cacheline) {
+ prev = k;
k = bkey_next(k);
+ }
- t->prev[j] = bkey_u64s(k);
- k = bkey_next(k);
- cacheline++;
- t->tree[j].m = bkey_to_cacheline_offset(k);
+ t->prev[j] = bkey_u64s(prev);
+ t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
}
- while (bkey_next(k) != end(t->data))
+ while (bkey_next(k) != bset_bkey_last(t->data))
k = bkey_next(k);
t->end = *k;
@@ -575,13 +733,15 @@ static void bset_build_written_tree(struct btree *b)
make_bfloat(t, j);
}
-void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
+/* Insert */
+
+void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
{
struct bset_tree *t;
- unsigned inorder, j = 1;
+ unsigned int inorder, j = 1;
- for (t = b->sets; t <= &b->sets[b->nsets]; t++)
- if (k < end(t->data))
+ for (t = b->set; t <= bset_tree_last(b); t++)
+ if (k < bset_bkey_last(t->data))
goto found_set;
BUG();
@@ -594,7 +754,7 @@ found_set:
if (k == t->data->start)
goto fix_left;
- if (bkey_next(k) == end(t->data)) {
+ if (bkey_next(k) == bset_bkey_last(t->data)) {
t->end = *k;
goto fix_right;
}
@@ -620,25 +780,28 @@ fix_right: do {
} while (j < t->size);
}
-void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
+static void bch_bset_fix_lookup_table(struct btree_keys *b,
+ struct bset_tree *t,
+ struct bkey *k)
{
- struct bset_tree *t = &b->sets[b->nsets];
- unsigned shift = bkey_u64s(k);
- unsigned j = bkey_to_cacheline(t, k);
+ unsigned int shift = bkey_u64s(k);
+ unsigned int j = bkey_to_cacheline(t, k);
/* We're getting called from btree_split() or btree_gc, just bail out */
if (!t->size)
return;
- /* k is the key we just inserted; we need to find the entry in the
+ /*
+ * k is the key we just inserted; we need to find the entry in the
* lookup table for the first key that is strictly greater than k:
* it's either k's cacheline or the next one
*/
- if (j < t->size &&
- table_to_bkey(t, j) <= k)
+ while (j < t->size &&
+ table_to_bkey(t, j) <= k)
j++;
- /* Adjust all the lookup table entries, and find a new key for any that
+ /*
+ * Adjust all the lookup table entries, and find a new key for any that
* have gotten too big
*/
for (; j < t->size; j++) {
@@ -650,56 +813,136 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
while (k < cacheline_to_bkey(t, j, 0))
k = bkey_next(k);
- t->prev[j] = bkey_to_cacheline_offset(k);
+ t->prev[j] = bkey_to_cacheline_offset(t, j, k);
}
}
- if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
+ if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
return;
/* Possibly add a new entry to the end of the lookup table */
for (k = table_to_bkey(t, t->size - 1);
- k != end(t->data);
+ k != bset_bkey_last(t->data);
k = bkey_next(k))
if (t->size == bkey_to_cacheline(t, k)) {
- t->prev[t->size] = bkey_to_cacheline_offset(k);
+ t->prev[t->size] =
+ bkey_to_cacheline_offset(t, t->size, k);
t->size++;
}
}
-void bch_bset_init_next(struct btree *b)
+/*
+ * Tries to merge l and r: l should be lower than r
+ * Returns true if we were able to merge. If we did merge, l will be the merged
+ * key, r will be untouched.
+ */
+bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
{
- struct bset *i = write_block(b);
+ if (!b->ops->key_merge)
+ return false;
- if (i != b->sets[0].data) {
- b->sets[++b->nsets].data = i;
- i->seq = b->sets[0].data->seq;
- } else
- get_random_bytes(&i->seq, sizeof(uint64_t));
+ /*
+ * Generic header checks
+ * Assumes left and right are in order
+ * Left and right must be exactly aligned
+ */
+ if (!bch_bkey_equal_header(l, r) ||
+ bkey_cmp(l, &START_KEY(r)))
+ return false;
- i->magic = bset_magic(b->c);
- i->version = 0;
- i->keys = 0;
+ return b->ops->key_merge(b, l, r);
+}
+
+void bch_bset_insert(struct btree_keys *b, struct bkey *where,
+ struct bkey *insert)
+{
+ struct bset_tree *t = bset_tree_last(b);
+
+ BUG_ON(!b->last_set_unwritten);
+ BUG_ON(bset_byte_offset(b, t->data) +
+ __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
+ PAGE_SIZE << b->page_order);
- bset_build_unwritten_tree(b);
+ memmove((uint64_t *) where + bkey_u64s(insert),
+ where,
+ (void *) bset_bkey_last(t->data) - (void *) where);
+
+ t->data->keys += bkey_u64s(insert);
+ bkey_copy(where, insert);
+ bch_bset_fix_lookup_table(b, t, where);
}
+unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ struct bkey *replace_key)
+{
+ unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
+ struct bset *i = bset_tree_last(b)->data;
+ struct bkey *m, *prev = NULL;
+ struct btree_iter_stack iter;
+ struct bkey preceding_key_on_stack = ZERO_KEY;
+ struct bkey *preceding_key_p = &preceding_key_on_stack;
+
+ BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+
+ /*
+ * If k has preceding key, preceding_key_p will be set to address
+ * of k's preceding key; otherwise preceding_key_p will be set
+ * to NULL inside preceding_key().
+ */
+ if (b->ops->is_extents)
+ preceding_key(&START_KEY(k), &preceding_key_p);
+ else
+ preceding_key(k, &preceding_key_p);
+
+ m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
+
+ if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
+ return status;
+
+ status = BTREE_INSERT_STATUS_INSERT;
+
+ while (m != bset_bkey_last(i) &&
+ bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) {
+ prev = m;
+ m = bkey_next(m);
+ }
+
+ /* prev is in the tree, if we merge we're done */
+ status = BTREE_INSERT_STATUS_BACK_MERGE;
+ if (prev &&
+ bch_bkey_try_merge(b, prev, k))
+ goto merged;
+#if 0
+ status = BTREE_INSERT_STATUS_OVERWROTE;
+ if (m != bset_bkey_last(i) &&
+ KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+ goto copy;
+#endif
+ status = BTREE_INSERT_STATUS_FRONT_MERGE;
+ if (m != bset_bkey_last(i) &&
+ bch_bkey_try_merge(b, k, m))
+ goto copy;
+
+ bch_bset_insert(b, m, k);
+copy: bkey_copy(m, k);
+merged:
+ return status;
+}
+
+/* Lookup */
+
struct bset_search_iter {
struct bkey *l, *r;
};
-static struct bset_search_iter bset_search_write_set(struct btree *b,
- struct bset_tree *t,
+static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
const struct bkey *search)
{
- unsigned li = 0, ri = t->size;
-
- BUG_ON(!b->nsets &&
- t->size < bkey_to_cacheline(t, end(t->data)));
+ unsigned int li = 0, ri = t->size;
while (li + 1 != ri) {
- unsigned m = (li + ri) >> 1;
+ unsigned int m = (li + ri) >> 1;
if (bkey_cmp(table_to_bkey(t, m), search) > 0)
ri = m;
@@ -709,43 +952,37 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
return (struct bset_search_iter) {
table_to_bkey(t, li),
- ri < t->size ? table_to_bkey(t, ri) : end(t->data)
+ ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
};
}
-static struct bset_search_iter bset_search_tree(struct btree *b,
- struct bset_tree *t,
+static struct bset_search_iter bset_search_tree(struct bset_tree *t,
const struct bkey *search)
{
struct bkey *l, *r;
struct bkey_float *f;
- unsigned inorder, j, n = 1;
+ unsigned int inorder, j, n = 1;
do {
- unsigned p = n << 4;
- p &= ((int) (p - t->size)) >> 31;
+ unsigned int p = n << 4;
- prefetch(&t->tree[p]);
+ if (p < t->size)
+ prefetch(&t->tree[p]);
j = n;
f = &t->tree[j];
- /*
- * n = (f->mantissa > bfloat_mantissa())
- * ? j * 2
- * : j * 2 + 1;
- *
- * We need to subtract 1 from f->mantissa for the sign bit trick
- * to work - that's done in make_bfloat()
- */
- if (likely(f->exponent != 127))
- n = j * 2 + (((unsigned)
- (f->mantissa -
- bfloat_mantissa(search, f))) >> 31);
- else
- n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
- ? j * 2
- : j * 2 + 1;
+ if (likely(f->exponent != 127)) {
+ if (f->mantissa >= bfloat_mantissa(search, f))
+ n = j * 2;
+ else
+ n = j * 2 + 1;
+ } else {
+ if (bkey_cmp(tree_to_bkey(t, j), search) > 0)
+ n = j * 2;
+ else
+ n = j * 2 + 1;
+ }
} while (n < t->size);
inorder = to_inorder(j, t);
@@ -761,7 +998,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
f = &t->tree[inorder_next(j, t->size)];
r = cacheline_to_bkey(t, inorder, f->m);
} else
- r = end(t->data);
+ r = bset_bkey_last(t->data);
} else {
r = cacheline_to_bkey(t, inorder, f->m);
@@ -775,7 +1012,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
return (struct bset_search_iter) {l, r};
}
-struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
+struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search)
{
struct bset_search_iter i;
@@ -797,7 +1034,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
if (unlikely(!t->size)) {
i.l = t->data->start;
- i.r = end(t->data);
+ i.r = bset_bkey_last(t->data);
} else if (bset_written(b, t)) {
/*
* Each node in the auxiliary search tree covers a certain range
@@ -807,25 +1044,29 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
*/
if (unlikely(bkey_cmp(search, &t->end) >= 0))
- return end(t->data);
+ return bset_bkey_last(t->data);
if (unlikely(bkey_cmp(search, t->data->start) < 0))
return t->data->start;
- i = bset_search_tree(b, t, search);
- } else
- i = bset_search_write_set(b, t, search);
+ i = bset_search_tree(t, search);
+ } else {
+ BUG_ON(!b->nsets &&
+ t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
-#ifdef CONFIG_BCACHE_EDEBUG
- BUG_ON(bset_written(b, t) &&
- i.l != t->data->start &&
- bkey_cmp(tree_to_prev_bkey(t,
- inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
- search) > 0);
+ i = bset_search_write_set(t, search);
+ }
- BUG_ON(i.r != end(t->data) &&
- bkey_cmp(i.r, search) <= 0);
-#endif
+ if (btree_keys_expensive_checks(b)) {
+ BUG_ON(bset_written(b, t) &&
+ i.l != t->data->start &&
+ bkey_cmp(tree_to_prev_bkey(t,
+ inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
+ search) > 0);
+
+ BUG_ON(i.r != bset_bkey_last(t->data) &&
+ bkey_cmp(i.r, search) <= 0);
+ }
while (likely(i.l != i.r) &&
bkey_cmp(i.l, search) <= 0)
@@ -836,12 +1077,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
/* Btree iterator */
+typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
+ struct btree_iter_set);
+
static inline bool btree_iter_cmp(struct btree_iter_set l,
struct btree_iter_set r)
{
- int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
-
- return c ? c > 0 : l.k < r.k;
+ return bkey_cmp(l.k, r.k) > 0;
}
static inline bool btree_iter_end(struct btree_iter *iter)
@@ -858,27 +1100,44 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
btree_iter_cmp));
}
-struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
- struct bkey *search, struct bset_tree *start)
+static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search,
+ struct bset_tree *start)
{
struct bkey *ret = NULL;
- iter->size = ARRAY_SIZE(iter->data);
- iter->used = 0;
- for (; start <= &b->sets[b->nsets]; start++) {
+ iter->iter.size = ARRAY_SIZE(iter->stack_data);
+ iter->iter.used = 0;
+
+#ifdef CONFIG_BCACHE_DEBUG
+ iter->iter.b = b;
+#endif
+
+ for (; start <= bset_tree_last(b); start++) {
ret = bch_bset_search(b, start, search);
- bch_btree_iter_push(iter, ret, end(start->data));
+ bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
}
return ret;
}
-struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search)
+{
+ return __bch_btree_iter_stack_init(b, iter, search, b->set);
+}
+
+static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+ btree_iter_cmp_fn *cmp)
{
- struct btree_iter_set unused;
+ struct btree_iter_set b __maybe_unused;
struct bkey *ret = NULL;
if (!btree_iter_end(iter)) {
+ bch_btree_iter_next_check(iter);
+
ret = iter->data->k;
iter->data->k = bkey_next(iter->data->k);
@@ -888,16 +1147,22 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
}
if (iter->data->k == iter->data->end)
- heap_pop(iter, unused, btree_iter_cmp);
+ heap_pop(iter, b, cmp);
else
- heap_sift(iter, 0, btree_iter_cmp);
+ heap_sift(iter, 0, cmp);
}
return ret;
}
+struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+{
+ return __bch_btree_iter_next(iter, btree_iter_cmp);
+
+}
+
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
- struct btree *b, ptr_filter_fn fn)
+ struct btree_keys *b, ptr_filter_fn fn)
{
struct bkey *ret;
@@ -908,63 +1173,55 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
return ret;
}
-struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
-{
- struct btree_iter iter;
-
- bch_btree_iter_init(b, &iter, search);
- return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
-}
-
/* Mergesort */
-static void btree_sort_fixup(struct btree_iter *iter)
+void bch_bset_sort_state_free(struct bset_sort_state *state)
{
- while (iter->used > 1) {
- struct btree_iter_set *top = iter->data, *i = top + 1;
- struct bkey *k;
-
- if (iter->used > 2 &&
- btree_iter_cmp(i[0], i[1]))
- i++;
+ mempool_exit(&state->pool);
+}
- for (k = i->k;
- k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
- k = bkey_next(k))
- if (top->k > i->k)
- __bch_cut_front(top->k, k);
- else if (KEY_SIZE(k))
- bch_cut_back(&START_KEY(k), top->k);
+int bch_bset_sort_state_init(struct bset_sort_state *state,
+ unsigned int page_order)
+{
+ spin_lock_init(&state->time.lock);
- if (top->k < i->k || k == i->k)
- break;
+ state->page_order = page_order;
+ state->crit_factor = int_sqrt(1 << page_order);
- heap_sift(iter, i - top, btree_iter_cmp);
- }
+ return mempool_init_page_pool(&state->pool, 1, page_order);
}
-static void btree_mergesort(struct btree *b, struct bset *out,
+static void btree_mergesort(struct btree_keys *b, struct bset *out,
struct btree_iter *iter,
bool fixup, bool remove_stale)
{
+ int i;
struct bkey *k, *last = NULL;
- bool (*bad)(struct btree *, const struct bkey *) = remove_stale
+ BKEY_PADDED(k) tmp;
+ bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
? bch_ptr_bad
: bch_ptr_invalid;
+ /* Heapify the iterator, using our comparison function */
+ for (i = iter->used / 2 - 1; i >= 0; --i)
+ heap_sift(iter, i, b->ops->sort_cmp);
+
while (!btree_iter_end(iter)) {
- if (fixup && !b->level)
- btree_sort_fixup(iter);
+ if (b->ops->sort_fixup && fixup)
+ k = b->ops->sort_fixup(iter, &tmp.k);
+ else
+ k = NULL;
+
+ if (!k)
+ k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
- k = bch_btree_iter_next(iter);
if (bad(b, k))
continue;
if (!last) {
last = out->start;
bkey_copy(last, k);
- } else if (b->level ||
- !bch_bkey_try_merge(b, last, k)) {
+ } else if (!bch_bkey_try_merge(b, last, k)) {
last = bkey_next(last);
bkey_copy(last, k);
}
@@ -972,167 +1229,147 @@ static void btree_mergesort(struct btree *b, struct bset *out,
out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
- pr_debug("sorted %i keys", out->keys);
- bch_check_key_order(b, out);
+ pr_debug("sorted %i keys\n", out->keys);
}
-static void __btree_sort(struct btree *b, struct btree_iter *iter,
- unsigned start, unsigned order, bool fixup)
+static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
+ unsigned int start, unsigned int order, bool fixup,
+ struct bset_sort_state *state)
{
uint64_t start_time;
- bool remove_stale = !b->written;
- struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
+ bool used_mempool = false;
+ struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT,
order);
if (!out) {
- mutex_lock(&b->c->sort_lock);
- out = b->c->sort;
- order = ilog2(bucket_pages(b->c));
+ struct page *outp;
+
+ BUG_ON(order > state->page_order);
+
+ outp = mempool_alloc(&state->pool, GFP_NOIO);
+ out = page_address(outp);
+ used_mempool = true;
+ order = state->page_order;
}
start_time = local_clock();
- btree_mergesort(b, out, iter, fixup, remove_stale);
+ btree_mergesort(b, out, iter, fixup, false);
b->nsets = start;
- if (!fixup && !start && b->written)
- bch_btree_verify(b, out);
-
if (!start && order == b->page_order) {
/*
* Our temporary buffer is the same size as the btree node's
* buffer, we can just swap buffers instead of doing a big
* memcpy()
+ *
+ * Don't worry event 'out' is allocated from mempool, it can
+ * still be swapped here. Because state->pool is a page mempool
+ * created by mempool_init_page_pool(), which allocates
+ * pages by alloc_pages() indeed.
*/
- out->magic = bset_magic(b->c);
- out->seq = b->sets[0].data->seq;
- out->version = b->sets[0].data->version;
- swap(out, b->sets[0].data);
-
- if (b->c->sort == b->sets[0].data)
- b->c->sort = out;
+ out->magic = b->set->data->magic;
+ out->seq = b->set->data->seq;
+ out->version = b->set->data->version;
+ swap(out, b->set->data);
} else {
- b->sets[start].data->keys = out->keys;
- memcpy(b->sets[start].data->start, out->start,
- (void *) end(out) - (void *) out->start);
+ b->set[start].data->keys = out->keys;
+ memcpy(b->set[start].data->start, out->start,
+ (void *) bset_bkey_last(out) - (void *) out->start);
}
- if (out == b->c->sort)
- mutex_unlock(&b->c->sort_lock);
+ if (used_mempool)
+ mempool_free(virt_to_page(out), &state->pool);
else
free_pages((unsigned long) out, order);
- if (b->written)
- bset_build_written_tree(b);
+ bch_bset_build_written_tree(b);
- if (!start) {
- spin_lock(&b->c->sort_time_lock);
- bch_time_stats_update(&b->c->sort_time, start_time);
- spin_unlock(&b->c->sort_time_lock);
- }
+ if (!start)
+ bch_time_stats_update(&state->time, start_time);
}
-void bch_btree_sort_partial(struct btree *b, unsigned start)
+void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ struct bset_sort_state *state)
{
- size_t oldsize = 0, order = b->page_order, keys = 0;
- struct btree_iter iter;
- __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
-
- BUG_ON(b->sets[b->nsets].data == write_block(b) &&
- (b->sets[b->nsets].size || b->nsets));
+ size_t order = b->page_order, keys = 0;
+ struct btree_iter_stack iter;
+ int oldsize = bch_count_data(b);
- if (b->written)
- oldsize = bch_count_data(b);
+ __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
if (start) {
- unsigned i;
+ unsigned int i;
for (i = start; i <= b->nsets; i++)
- keys += b->sets[i].data->keys;
+ keys += b->set[i].data->keys;
- order = roundup_pow_of_two(__set_bytes(b->sets->data,
- keys)) / PAGE_SIZE;
- if (order)
- order = ilog2(order);
+ order = get_order(__set_bytes(b->set->data, keys));
}
- __btree_sort(b, &iter, start, order, false);
+ __btree_sort(b, &iter.iter, start, order, false, state);
- EBUG_ON(b->written && bch_count_data(b) != oldsize);
+ EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
-void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
+void bch_btree_sort_and_fix_extents(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bset_sort_state *state)
{
- BUG_ON(!b->written);
- __btree_sort(b, iter, 0, b->page_order, true);
+ __btree_sort(b, iter, 0, b->page_order, true, state);
}
-void bch_btree_sort_into(struct btree *b, struct btree *new)
+void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
+ struct btree_iter_stack iter;
- struct btree_iter iter;
- bch_btree_iter_init(b, &iter, NULL);
+ bch_btree_iter_stack_init(b, &iter, NULL);
- btree_mergesort(b, new->sets->data, &iter, false, true);
+ btree_mergesort(b, new->set->data, &iter.iter, false, true);
- spin_lock(&b->c->sort_time_lock);
- bch_time_stats_update(&b->c->sort_time, start_time);
- spin_unlock(&b->c->sort_time_lock);
+ bch_time_stats_update(&state->time, start_time);
- bkey_copy_key(&new->key, &b->key);
- new->sets->size = 0;
+ new->set->size = 0; // XXX: why?
}
-void bch_btree_sort_lazy(struct btree *b)
-{
- if (b->nsets) {
- unsigned i, j, keys = 0, total;
-
- for (i = 0; i <= b->nsets; i++)
- keys += b->sets[i].data->keys;
+#define SORT_CRIT (4096 / sizeof(uint64_t))
- total = keys;
+void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
+{
+ unsigned int crit = SORT_CRIT;
+ int i;
- for (j = 0; j < b->nsets; j++) {
- if (keys * 2 < total ||
- keys < 1000) {
- bch_btree_sort_partial(b, j);
- return;
- }
+ /* Don't sort if nothing to do */
+ if (!b->nsets)
+ goto out;
- keys -= b->sets[j].data->keys;
- }
+ for (i = b->nsets - 1; i >= 0; --i) {
+ crit *= state->crit_factor;
- /* Must sort if b->nsets == 3 or we'll overflow */
- if (b->nsets >= (MAX_BSETS - 1) - b->level) {
- bch_btree_sort(b);
+ if (b->set[i].data->keys < crit) {
+ bch_btree_sort_partial(b, i, state);
return;
}
}
- bset_build_written_tree(b);
-}
-
-/* Sysfs stuff */
+ /* Sort if we'd overflow */
+ if (b->nsets + 1 == MAX_BSETS) {
+ bch_btree_sort(b, state);
+ return;
+ }
-struct bset_stats {
- size_t nodes;
- size_t sets_written, sets_unwritten;
- size_t bytes_written, bytes_unwritten;
- size_t floats, failed;
-};
+out:
+ bch_bset_build_written_tree(b);
+}
-static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
- struct bset_stats *stats)
+void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{
- struct bkey *k;
- unsigned i;
-
- stats->nodes++;
+ unsigned int i;
for (i = 0; i <= b->nsets; i++) {
- struct bset_tree *t = &b->sets[i];
+ struct bset_tree *t = &b->set[i];
size_t bytes = t->data->keys * sizeof(uint64_t);
size_t j;
@@ -1150,43 +1387,4 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
stats->bytes_unwritten += bytes;
}
}
-
- if (b->level) {
- struct btree_iter iter;
-
- for_each_key_filter(b, k, &iter, bch_ptr_bad) {
- int ret = btree(bset_stats, k, b, op, stats);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-int bch_bset_print_stats(struct cache_set *c, char *buf)
-{
- struct btree_op op;
- struct bset_stats t;
- int ret;
-
- bch_btree_op_init_stack(&op);
- memset(&t, 0, sizeof(struct bset_stats));
-
- ret = btree_root(bset_stats, c, &op, &t);
- if (ret)
- return ret;
-
- return snprintf(buf, PAGE_SIZE,
- "btree nodes: %zu\n"
- "written sets: %zu\n"
- "unwritten sets: %zu\n"
- "written key bytes: %zu\n"
- "unwritten key bytes: %zu\n"
- "floats: %zu\n"
- "failed: %zu\n",
- t.nodes,
- t.sets_written, t.sets_unwritten,
- t.bytes_written, t.bytes_unwritten,
- t.floats, t.failed);
}