summaryrefslogtreecommitdiff
path: root/fs/bcachefs/recovery_passes.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/recovery_passes.c')
-rw-r--r--fs/bcachefs/recovery_passes.c106
1 files changed, 88 insertions, 18 deletions
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
index dabb29b08ad0..35ac0d64d73a 100644
--- a/fs/bcachefs/recovery_passes.c
+++ b/fs/bcachefs/recovery_passes.c
@@ -103,20 +103,20 @@ static void bch2_sb_recovery_passes_to_text(struct printbuf *out,
prt_tab(out);
bch2_pr_time_units(out, le32_to_cpu(i->last_runtime) * NSEC_PER_SEC);
+
+ if (BCH_RECOVERY_PASS_NO_RATELIMIT(i))
+ prt_str(out, " (no ratelimit)");
+
prt_newline(out);
}
}
-static void bch2_sb_recovery_pass_complete(struct bch_fs *c,
- enum bch_recovery_pass pass,
- s64 start_time)
+static struct recovery_pass_entry *bch2_sb_recovery_pass_entry(struct bch_fs *c,
+ enum bch_recovery_pass pass)
{
enum bch_recovery_pass_stable stable = bch2_recovery_pass_to_stable(pass);
- s64 end_time = ktime_get_real_seconds();
- mutex_lock(&c->sb_lock);
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
- __clear_bit_le64(stable, ext->recovery_passes_required);
+ lockdep_assert_held(&c->sb_lock);
struct bch_sb_field_recovery_passes *r =
bch2_sb_field_get(c->disk_sb.sb, recovery_passes);
@@ -127,15 +127,43 @@ static void bch2_sb_recovery_pass_complete(struct bch_fs *c,
r = bch2_sb_field_resize(&c->disk_sb, recovery_passes, u64s);
if (!r) {
bch_err(c, "error creating recovery_passes sb section");
- goto out;
+ return NULL;
}
}
- r->start[stable].last_run = cpu_to_le64(end_time);
- r->start[stable].last_runtime = cpu_to_le32(max(0, end_time - start_time));
-out:
+ return r->start + stable;
+}
+
+static void bch2_sb_recovery_pass_complete(struct bch_fs *c,
+ enum bch_recovery_pass pass,
+ s64 start_time)
+{
+ guard(mutex)(&c->sb_lock);
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
+ __clear_bit_le64(bch2_recovery_pass_to_stable(pass),
+ ext->recovery_passes_required);
+
+ struct recovery_pass_entry *e = bch2_sb_recovery_pass_entry(c, pass);
+ if (e) {
+ s64 end_time = ktime_get_real_seconds();
+ e->last_run = cpu_to_le64(end_time);
+ e->last_runtime = cpu_to_le32(max(0, end_time - start_time));
+ SET_BCH_RECOVERY_PASS_NO_RATELIMIT(e, false);
+ }
+
bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
+}
+
+void bch2_recovery_pass_set_no_ratelimit(struct bch_fs *c,
+ enum bch_recovery_pass pass)
+{
+ guard(mutex)(&c->sb_lock);
+
+ struct recovery_pass_entry *e = bch2_sb_recovery_pass_entry(c, pass);
+ if (e && !BCH_RECOVERY_PASS_NO_RATELIMIT(e)) {
+ SET_BCH_RECOVERY_PASS_NO_RATELIMIT(e, false);
+ bch2_write_super(c);
+ }
}
static bool bch2_recovery_pass_want_ratelimit(struct bch_fs *c, enum bch_recovery_pass pass)
@@ -157,6 +185,9 @@ static bool bch2_recovery_pass_want_ratelimit(struct bch_fs *c, enum bch_recover
*/
ret = (u64) le32_to_cpu(i->last_runtime) * 100 >
ktime_get_real_seconds() - le64_to_cpu(i->last_run);
+
+ if (BCH_RECOVERY_PASS_NO_RATELIMIT(i))
+ ret = false;
}
return ret;
@@ -263,8 +294,13 @@ static bool recovery_pass_needs_set(struct bch_fs *c,
enum bch_run_recovery_pass_flags *flags)
{
struct bch_fs_recovery *r = &c->recovery;
- bool in_recovery = test_bit(BCH_FS_in_recovery, &c->flags);
- bool persistent = !in_recovery || !(*flags & RUN_RECOVERY_PASS_nopersistent);
+
+ /*
+ * Never run scan_for_btree_nodes persistently: check_topology will run
+ * it if required
+ */
+ if (pass == BCH_RECOVERY_PASS_scan_for_btree_nodes)
+ *flags |= RUN_RECOVERY_PASS_nopersistent;
if ((*flags & RUN_RECOVERY_PASS_ratelimit) &&
!bch2_recovery_pass_want_ratelimit(c, pass))
@@ -279,6 +315,8 @@ static bool recovery_pass_needs_set(struct bch_fs *c,
* Otherwise, we run run_explicit_recovery_pass when we find damage, so
* it should run again even if it's already run:
*/
+ bool in_recovery = test_bit(BCH_FS_in_recovery, &c->flags);
+ bool persistent = !in_recovery || !(*flags & RUN_RECOVERY_PASS_nopersistent);
if (persistent
? !(c->sb.recovery_passes_required & BIT_ULL(pass))
@@ -303,6 +341,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c,
struct bch_fs_recovery *r = &c->recovery;
int ret = 0;
+
lockdep_assert_held(&c->sb_lock);
bch2_printbuf_make_room(out, 1024);
@@ -315,7 +354,9 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c,
goto out;
bool in_recovery = test_bit(BCH_FS_in_recovery, &c->flags);
- bool rewind = in_recovery && r->curr_pass > pass;
+ bool rewind = in_recovery &&
+ r->curr_pass > pass &&
+ !(r->passes_complete & BIT_ULL(pass));
bool ratelimit = flags & RUN_RECOVERY_PASS_ratelimit;
if (!(in_recovery && (flags & RUN_RECOVERY_PASS_nopersistent))) {
@@ -327,7 +368,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c,
(!in_recovery || r->curr_pass >= BCH_RECOVERY_PASS_set_may_go_rw)) {
prt_printf(out, "need recovery pass %s (%u), but already rw\n",
bch2_recovery_passes[pass], pass);
- ret = -BCH_ERR_cannot_rewind_recovery;
+ ret = bch_err_throw(c, cannot_rewind_recovery);
goto out;
}
@@ -347,7 +388,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c,
if (rewind) {
r->next_pass = pass;
r->passes_complete &= (1ULL << pass) >> 1;
- ret = -BCH_ERR_restart_recovery;
+ ret = bch_err_throw(c, restart_recovery);
}
} else {
prt_printf(out, "scheduling recovery pass %s (%u)%s\n",
@@ -382,9 +423,38 @@ int bch2_run_explicit_recovery_pass(struct bch_fs *c,
return ret;
}
+/*
+ * Returns 0 if @pass has run recently, otherwise one of
+ * -BCH_ERR_restart_recovery
+ * -BCH_ERR_recovery_pass_will_run
+ */
+int bch2_require_recovery_pass(struct bch_fs *c,
+ struct printbuf *out,
+ enum bch_recovery_pass pass)
+{
+ if (test_bit(BCH_FS_in_recovery, &c->flags) &&
+ c->recovery.passes_complete & BIT_ULL(pass))
+ return 0;
+
+ guard(mutex)(&c->sb_lock);
+
+ if (bch2_recovery_pass_want_ratelimit(c, pass))
+ return 0;
+
+ enum bch_run_recovery_pass_flags flags = 0;
+ int ret = 0;
+
+ if (recovery_pass_needs_set(c, pass, &flags)) {
+ ret = __bch2_run_explicit_recovery_pass(c, out, pass, flags);
+ bch2_write_super(c);
+ }
+
+ return ret ?: bch_err_throw(c, recovery_pass_will_run);
+}
+
int bch2_run_print_explicit_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
{
- enum bch_run_recovery_pass_flags flags = RUN_RECOVERY_PASS_nopersistent;
+ enum bch_run_recovery_pass_flags flags = 0;
if (!recovery_pass_needs_set(c, pass, &flags))
return 0;