diff options
Diffstat (limited to 'fs/bcachefs/movinggc.c')
-rw-r--r-- | fs/bcachefs/movinggc.c | 79 |
1 files changed, 47 insertions, 32 deletions
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 69e06a84dad4..6718dc37c5a3 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -35,9 +35,10 @@ struct buckets_in_flight { }; static const struct rhashtable_params bch_move_bucket_params = { - .head_offset = offsetof(struct move_bucket_in_flight, hash), - .key_offset = offsetof(struct move_bucket_in_flight, bucket.k), - .key_len = sizeof(struct move_bucket_key), + .head_offset = offsetof(struct move_bucket_in_flight, hash), + .key_offset = offsetof(struct move_bucket_in_flight, bucket.k), + .key_len = sizeof(struct move_bucket_key), + .automatic_shrinking = true, }; static struct move_bucket_in_flight * @@ -72,31 +73,36 @@ move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b) static int bch2_bucket_is_movable(struct btree_trans *trans, struct move_bucket *b, u64 time) { - struct btree_iter iter; - struct bkey_s_c k; - struct bch_alloc_v4 _a; - const struct bch_alloc_v4 *a; - int ret; + struct bch_fs *c = trans->c; - if (bch2_bucket_is_open(trans->c, - b->k.bucket.inode, - b->k.bucket.offset)) + if (bch2_bucket_is_open(c, b->k.bucket.inode, b->k.bucket.offset)) return 0; - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, - b->k.bucket, BTREE_ITER_CACHED); - ret = bkey_err(k); + struct btree_iter iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, + b->k.bucket, BTREE_ITER_cached); + int ret = bkey_err(k); if (ret) return ret; - a = bch2_alloc_to_v4(k, &_a); + struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode); + if (!ca) + goto out; + + if (ca->mi.state != BCH_MEMBER_STATE_rw || + !bch2_dev_is_online(ca)) + goto out_put; + + struct bch_alloc_v4 _a; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); b->k.gen = a->gen; b->sectors = bch2_bucket_sectors_dirty(*a); + u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); - ret = data_type_movable(a->data_type) && - a->fragmentation_lru && - a->fragmentation_lru <= time; - + ret = lru_idx && lru_idx <= time; +out_put: + bch2_dev_put(ca); +out: bch2_trans_iter_exit(trans, &iter); return ret; } @@ -155,11 +161,12 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt, if (bch2_err_matches(ret, EROFS)) return ret; - if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_tryflush()", - __func__, bch2_err_str(ret))) + if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret))) return ret; - ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru, + bch2_trans_begin(trans); + + ret = for_each_btree_key_max(trans, iter, BTREE_ID_lru, lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0), lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX), 0, k, ({ @@ -207,7 +214,8 @@ static int bch2_copygc(struct moving_context *ctxt, }; move_buckets buckets = { 0 }; struct move_bucket_in_flight *f; - u64 moved = atomic64_read(&ctxt->stats->sectors_moved); + u64 sectors_seen = atomic64_read(&ctxt->stats->sectors_seen); + u64 sectors_moved = atomic64_read(&ctxt->stats->sectors_moved); int ret = 0; ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets); @@ -237,7 +245,6 @@ static int bch2_copygc(struct moving_context *ctxt, *did_work = true; } err: - darray_exit(&buckets); /* no entries in LRU btree found, or got to end: */ if (bch2_err_matches(ret, ENOENT)) @@ -246,8 +253,11 @@ err: if (ret < 0 && !bch2_err_matches(ret, EROFS)) bch_err_msg(c, ret, "from bch2_move_data()"); - moved = atomic64_read(&ctxt->stats->sectors_moved) - moved; - trace_and_count(c, copygc, c, moved, 0, 0, 0); + sectors_seen = atomic64_read(&ctxt->stats->sectors_seen) - sectors_seen; + sectors_moved = atomic64_read(&ctxt->stats->sectors_moved) - sectors_moved; + trace_and_count(c, copygc, c, buckets.nr, sectors_seen, sectors_moved); + + darray_exit(&buckets); return ret; } @@ -288,18 +298,23 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c) void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c) { - prt_printf(out, "Currently waiting for: "); + printbuf_tabstop_push(out, 32); + prt_printf(out, "running:\t%u\n", c->copygc_running); + prt_printf(out, "copygc_wait:\t%llu\n", c->copygc_wait); + prt_printf(out, "copygc_wait_at:\t%llu\n", c->copygc_wait_at); + + prt_printf(out, "Currently waiting for:\t"); prt_human_readable_u64(out, max(0LL, c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now)) << 9); prt_newline(out); - prt_printf(out, "Currently waiting since: "); + prt_printf(out, "Currently waiting since:\t"); prt_human_readable_u64(out, max(0LL, atomic64_read(&c->io_clock[WRITE].now) - c->copygc_wait_at) << 9); prt_newline(out); - prt_printf(out, "Currently calculated wait: "); + prt_printf(out, "Currently calculated wait:\t"); prt_human_readable_u64(out, bch2_copygc_wait_amount(c)); prt_newline(out); } @@ -337,9 +352,9 @@ static int bch2_copygc_thread(void *arg) bch2_trans_unlock_long(ctxt.trans); cond_resched(); - if (!c->copy_gc_enabled) { + if (!c->opts.copygc_enabled) { move_buckets_wait(&ctxt, buckets, true); - kthread_wait_freezable(c->copy_gc_enabled || + kthread_wait_freezable(c->opts.copygc_enabled || kthread_should_stop()); } @@ -376,7 +391,7 @@ static int bch2_copygc_thread(void *arg) if (min_member_capacity == U64_MAX) min_member_capacity = 128 * 2048; - bch2_trans_unlock_long(ctxt.trans); + move_buckets_wait(&ctxt, buckets, true); bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6), MAX_SCHEDULE_TIMEOUT); } |