#include "alloc_foreground.h"
#include "bkey_methods.h"
#include "bkey_buf.h"
+#include "btree_journal_iter.h"
#include "btree_key_cache.h"
#include "btree_locking.h"
#include "btree_update_interior.h"
#define DROP_THIS_NODE 10
#define DROP_PREV_NODE 11
+static bool should_restart_for_topology_repair(struct bch_fs *c)
+{
+ return c->opts.fix_errors != FSCK_FIX_no &&
+ !(c->recovery_passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology));
+}
+
static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
{
preempt_disable();
" cur %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
buf1.buf, buf2.buf) &&
- !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
+ should_restart_for_topology_repair(c)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = -BCH_ERR_need_topology_repair;
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
goto err;
} else {
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
" expected %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
buf1.buf, buf2.buf) &&
- !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
+ should_restart_for_topology_repair(c)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = -BCH_ERR_need_topology_repair;
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
goto err;
} else {
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
}
if (ret) {
- bch_err(c, "%s: error getting btree node: %s",
- __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "getting btree node");
break;
}
ret = PTR_ERR_OR_ZERO(cur);
if (ret) {
- bch_err(c, "%s: error getting btree node: %s",
- __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "getting btree node");
goto err;
}
return ret;
}
-static int bch2_repair_topology(struct bch_fs *c)
+int bch2_check_topology(struct bch_fs *c)
{
struct btree_trans trans;
struct btree *b;
bch2_trans_init(&trans, c, 0, 0);
- for (i = 0; i < BTREE_ID_NR && !ret; i++) {
- b = c->btree_roots[i].b;
+ for (i = 0; i < btree_id_nr_alive(c)&& !ret; i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (!r->alive)
+ continue;
+
+ b = r->b;
if (btree_node_fake(b))
continue;
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
if (!new) {
- bch_err(c, "%s: error allocating new key", __func__);
+ bch_err_msg(c, ret, "allocating new key");
ret = -BCH_ERR_ENOMEM_gc_repair_key;
goto err;
}
fsck_err:
err:
if (ret)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
return ret;
mutex_lock(&c->btree_root_lock);
- b = c->btree_roots[btree_id].b;
+ b = bch2_btree_id_root(c, btree_id)->b;
if (!btree_node_fake(b)) {
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
false, &k, true);
- if (ret) {
- bch_err(c, "%s: error from bch2_gc_mark_key: %s",
- __func__, bch2_err_str(ret));
+ if (ret)
goto fsck_err;
- }
if (b->c.level) {
bch2_bkey_buf_reassemble(&cur, c, k);
b->c.level - 1,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
- !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
- ret = -BCH_ERR_need_topology_repair;
+ should_restart_for_topology_repair(c)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
goto fsck_err;
} else {
/* Continue marking when opted to not
continue;
}
} else if (ret) {
- bch_err(c, "%s: error getting btree node: %s",
- __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "getting btree node");
break;
}
struct printbuf buf = PRINTBUF;
int ret = 0;
- b = c->btree_roots[btree_id].b;
+ b = bch2_btree_id_root(c, btree_id)->b;
if (btree_node_fake(b))
return 0;
six_unlock_read(&b->c.lock);
if (ret < 0)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
printbuf_exit(&buf);
return ret;
}
? bch2_gc_btree_init(&trans, ids[i], metadata_only)
: bch2_gc_btree(&trans, ids[i], initial, metadata_only);
+ for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) {
+ if (!bch2_btree_id_root(c, i)->alive)
+ continue;
+
+ ret = initial
+ ? bch2_gc_btree_init(&trans, i, metadata_only)
+ : bch2_gc_btree(&trans, i, initial, metadata_only);
+ }
+
if (ret < 0)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
bch2_trans_exit(&trans);
return ret;
for_each_member_device(ca, c, dev) {
struct bch_dev_usage *dst = ca->usage_base;
struct bch_dev_usage *src = (void *)
- bch2_acc_percpu_u64s((void *) ca->usage_gc,
+ bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
dev_usage_u64s());
copy_dev_field(buckets_ec, "buckets_ec");
unsigned nr = fs_usage_u64s(c);
struct bch_fs_usage *dst = c->usage_base;
struct bch_fs_usage *src = (void *)
- bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
+ bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
copy_fs_field(hidden, "hidden");
copy_fs_field(btree, "btree");
if (ca)
percpu_ref_put(&ca->ref);
if (ret)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
percpu_up_write(&c->mark_lock);
printbuf_exit(&buf);
" should be %u",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
- struct bkey_i *new = bch2_bkey_make_mut(trans, iter, k, 0);
+ struct bkey_i *new = bch2_bkey_make_mut(trans, iter, &k, 0);
ret = PTR_ERR_OR_ZERO(new);
if (ret)
bch2_mark_superblocks(c);
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
- (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb) &&
- !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags) &&
- c->opts.fix_errors != FSCK_OPT_NO)) {
- bch_info(c, "Starting topology repair pass");
- ret = bch2_repair_topology(c);
- if (ret)
- goto out;
- bch_info(c, "Topology repair pass done");
-
- set_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags);
- }
-
ret = bch2_gc_btrees(c, initial, metadata_only);
- if (ret == -BCH_ERR_need_topology_repair &&
- !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags) &&
- !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) {
- set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
- SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, true);
- ret = 0;
- }
-
- if (ret == -BCH_ERR_need_topology_repair)
- ret = -BCH_ERR_fsck_errors_not_fixed;
-
if (ret)
goto out;
* allocator thread - issue wakeup in case they blocked on gc_lock:
*/
closure_wake_up(&c->freelist_wait);
+
+ if (ret)
+ bch_err_fn(c, ret);
return ret;
}
percpu_up_read(&c->mark_lock);
return 0;
update:
- u = bch2_bkey_make_mut(trans, iter, k, 0);
+ u = bch2_bkey_make_mut(trans, iter, &k, 0);
ret = PTR_ERR_OR_ZERO(u);
if (ret)
return ret;