#include "alloc_foreground.h"
#include "bkey_methods.h"
#include "bkey_buf.h"
+#include "btree_journal_iter.h"
#include "btree_key_cache.h"
#include "btree_locking.h"
#include "btree_update_interior.h"
#include "reflink.h"
#include "replicas.h"
#include "super-io.h"
+#include "trace.h"
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#include <linux/sched/task.h>
-#include <trace/events/bcachefs.h>
#define DROP_THIS_NODE 10
#define DROP_PREV_NODE 11
+static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
+{
+ return (struct bkey_s) {{{
+ (struct bkey *) k.k,
+ (struct bch_val *) k.v
+ }}};
+}
+
+static bool should_restart_for_topology_repair(struct bch_fs *c)
+{
+ return c->opts.fix_errors != FSCK_FIX_no &&
+ !(c->recovery_passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology));
+}
+
static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
{
preempt_disable();
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
if (__fsck_err(c,
- FSCK_CAN_FIX|
- FSCK_CAN_IGNORE|
- FSCK_NO_RATELIMIT,
- "btree node with incorrect min_key at btree %s level %u:\n"
- " prev %s\n"
- " cur %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
- buf1.buf, buf2.buf) &&
- !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
+ FSCK_CAN_FIX|
+ FSCK_CAN_IGNORE|
+ FSCK_NO_RATELIMIT,
+ btree_node_topology_bad_min_key,
+ "btree node with incorrect min_key at btree %s level %u:\n"
+ " prev %s\n"
+ " cur %s",
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
+ buf1.buf, buf2.buf) && should_restart_for_topology_repair(c)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = -BCH_ERR_need_topology_repair;
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
goto err;
} else {
- set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
+ set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
}
}
}
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
bch2_bpos_to_text(&buf2, node_end);
- if (__fsck_err(c,
- FSCK_CAN_FIX|
- FSCK_CAN_IGNORE|
- FSCK_NO_RATELIMIT,
+ if (__fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE|FSCK_NO_RATELIMIT,
+ btree_node_topology_bad_max_key,
"btree node with incorrect max_key at btree %s level %u:\n"
" %s\n"
" expected %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf) &&
- !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
+ should_restart_for_topology_repair(c)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = -BCH_ERR_need_topology_repair;
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
goto err;
} else {
- set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
+ set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
}
}
if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
cur->data->min_key), c,
+ btree_node_topology_overwritten_by_next_node,
"btree node overwritten by next node at btree %s level %u:\n"
" node %s\n"
" next %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf)) {
ret = DROP_PREV_NODE;
goto out;
if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
bpos_predecessor(cur->data->min_key)), c,
+ btree_node_topology_bad_max_key,
"btree node with incorrect max_key at btree %s level %u:\n"
" node %s\n"
" next %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf))
ret = set_node_max(c, prev,
bpos_predecessor(cur->data->min_key));
if (mustfix_fsck_err_on(bpos_ge(expected_start,
cur->data->max_key), c,
+ btree_node_topology_overwritten_by_prev_node,
"btree node overwritten by prev node at btree %s level %u:\n"
" prev %s\n"
" node %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf)) {
ret = DROP_THIS_NODE;
goto out;
}
if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
+ btree_node_topology_bad_min_key,
"btree node with incorrect min_key at btree %s level %u:\n"
" prev %s\n"
" node %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf))
ret = set_node_min(c, cur, expected_start);
}
bch2_bpos_to_text(&buf2, b->key.k.p);
if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
+ btree_node_topology_bad_max_key,
"btree node with incorrect max_key at btree %s level %u:\n"
" %s\n"
" expected %s",
- bch2_btree_ids[b->c.btree_id], b->c.level,
+ bch2_btree_id_str(b->c.btree_id), b->c.level,
buf1.buf, buf2.buf)) {
ret = set_node_max(c, child, b->key.k.p);
if (ret)
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
if (mustfix_fsck_err_on(ret == -EIO, c,
+ btree_node_unreadable,
"Topology repair: unreadable btree node at btree %s level %u:\n"
" %s",
- bch2_btree_ids[b->c.btree_id],
+ bch2_btree_id_str(b->c.btree_id),
b->c.level - 1,
buf.buf)) {
bch2_btree_node_evict(trans, cur_k.k);
continue;
}
- if (ret) {
- bch_err(c, "%s: error getting btree node: %s",
- __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "getting btree node");
+ if (ret)
break;
- }
ret = btree_repair_node_boundaries(c, b, prev, cur);
false);
ret = PTR_ERR_OR_ZERO(cur);
- if (ret) {
- bch_err(c, "%s: error getting btree node: %s",
- __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "getting btree node");
+ if (ret)
goto err;
- }
ret = bch2_btree_repair_topology_recurse(trans, cur);
six_unlock_read(&cur->c.lock);
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
if (mustfix_fsck_err_on(!have_child, c,
+ btree_node_topology_interior_node_empty,
"empty interior btree node at btree %s level %u\n"
" %s",
- bch2_btree_ids[b->c.btree_id],
+ bch2_btree_id_str(b->c.btree_id),
b->c.level, buf.buf))
ret = DROP_THIS_NODE;
err:
return ret;
}
-static int bch2_repair_topology(struct bch_fs *c)
+int bch2_check_topology(struct bch_fs *c)
{
- struct btree_trans trans;
+ struct btree_trans *trans = bch2_trans_get(c);
struct btree *b;
unsigned i;
int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
+ for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
- for (i = 0; i < BTREE_ID_NR && !ret; i++) {
- b = c->btree_roots[i].b;
+ if (!r->alive)
+ continue;
+
+ b = r->b;
if (btree_node_fake(b))
continue;
- btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read);
- ret = bch2_btree_repair_topology_recurse(&trans, b);
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
+ ret = bch2_btree_repair_topology_recurse(trans, b);
six_unlock_read(&b->c.lock);
if (ret == DROP_THIS_NODE) {
}
}
- bch2_trans_exit(&trans);
+ bch2_trans_put(trans);
return ret;
}
struct bkey_s_c *k)
{
struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(*k);
- const union bch_extent_entry *entry;
+ struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(*k);
+ const union bch_extent_entry *entry_c;
struct extent_ptr_decoded p = { 0 };
bool do_update = false;
struct printbuf buf = PRINTBUF;
* XXX
* use check_bucket_ref here
*/
- bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
+ bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr);
-
- if (c->opts.reconstruct_alloc ||
- fsck_err_on(!g->gen_valid, c,
- "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[ptr_data_type(k->k, &p.ptr)],
- p.ptr.gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry_c->ptr);
+
+ if (!g->gen_valid &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, ptr_to_missing_alloc_key,
+ "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) {
if (!p.ptr.cached) {
g->gen_valid = true;
g->gen = p.ptr.gen;
}
}
- if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, c,
- "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[ptr_data_type(k->k, &p.ptr)],
- p.ptr.gen, g->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
+ if (gen_cmp(p.ptr.gen, g->gen) > 0 &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) {
if (!p.ptr.cached) {
g->gen_valid = true;
g->gen = p.ptr.gen;
g->data_type = 0;
g->dirty_sectors = 0;
g->cached_sectors = 0;
- set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ set_bit(BCH_FS_need_another_gc, &c->flags);
} else {
do_update = true;
}
}
- if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, c,
- "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
- bch2_data_types[ptr_data_type(k->k, &p.ptr)],
- p.ptr.gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
+ if (gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf))))
do_update = true;
- if (fsck_err_on(!p.ptr.cached &&
- gen_cmp(p.ptr.gen, g->gen) < 0, c,
- "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[ptr_data_type(k->k, &p.ptr)],
- p.ptr.gen, g->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
+ if (!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0 &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, stale_dirty_ptr,
+ "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf))))
do_update = true;
if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
if (fsck_err_on(bucket_data_type(g->data_type) &&
bucket_data_type(g->data_type) != data_type, c,
+ ptr_bucket_data_type_mismatch,
"bucket %u:%zu different types of data in same bucket: %s, %s\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
if (data_type == BCH_DATA_btree) {
g->data_type = data_type;
- set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ set_bit(BCH_FS_need_another_gc, &c->flags);
} else {
do_update = true;
}
struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
if (fsck_err_on(!m || !m->alive, c,
+ ptr_to_missing_stripe,
"pointer to nonexistent stripe %llu\n"
"while marking %s",
(u64) p.ec.idx,
do_update = true;
if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
+ ptr_to_incorrect_stripe,
"pointer does not match stripe %llu\n"
"while marking %s",
(u64) p.ec.idx,
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
if (!new) {
- bch_err(c, "%s: error allocating new key", __func__);
ret = -BCH_ERR_ENOMEM_gc_repair_key;
+ bch_err_msg(c, ret, "allocating new key");
goto err;
}
if (level)
bch2_btree_node_update_key_early(trans, btree_id, level - 1, *k, new);
- if (c->opts.verbose) {
+ if (0) {
printbuf_reset(&buf);
bch2_bkey_val_to_text(&buf, c, *k);
bch_info(c, "updated %s", buf.buf);
struct bch_fs *c = trans->c;
struct bkey deleted = KEY(0, 0, 0);
struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
- unsigned flags =
- BTREE_TRIGGER_GC|
- (initial ? BTREE_TRIGGER_NOATOMIC : 0);
int ret = 0;
deleted.p = k->k->p;
goto err;
if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
+ bkey_version_in_future,
"key version number higher than recorded: %llu > %llu",
k->k->version.lo,
atomic64_read(&c->key_version)))
}
ret = commit_do(trans, NULL, NULL, 0,
- bch2_mark_key(trans, btree_id, level, old, *k, flags));
+ bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
fsck_err:
err:
- if (ret)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
return ret;
}
return ret;
mutex_lock(&c->btree_root_lock);
- b = c->btree_roots[btree_id].b;
+ b = bch2_btree_id_root(c, btree_id)->b;
if (!btree_node_fake(b)) {
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
false, &k, true);
- if (ret) {
- bch_err(c, "%s: error from bch2_gc_mark_key: %s",
- __func__, bch2_err_str(ret));
+ if (ret)
goto fsck_err;
- }
if (b->c.level) {
bch2_bkey_buf_reassemble(&cur, c, k);
FSCK_CAN_FIX|
FSCK_CAN_IGNORE|
FSCK_NO_RATELIMIT,
+ btree_node_read_error,
"Unreadable btree node at btree %s level %u:\n"
" %s",
- bch2_btree_ids[b->c.btree_id],
+ bch2_btree_id_str(b->c.btree_id),
b->c.level - 1,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
- !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
- ret = -BCH_ERR_need_topology_repair;
+ should_restart_for_topology_repair(c)) {
bch_info(c, "Halting mark and sweep to start topology repair pass");
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
goto fsck_err;
} else {
/* Continue marking when opted to not
* fix the error: */
ret = 0;
- set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
+ set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
continue;
}
} else if (ret) {
- bch_err(c, "%s: error getting btree node: %s",
- __func__, bch2_err_str(ret));
+ bch_err_msg(c, ret, "getting btree node");
break;
}
struct printbuf buf = PRINTBUF;
int ret = 0;
- b = c->btree_roots[btree_id].b;
+ b = bch2_btree_id_root(c, btree_id)->b;
if (btree_node_fake(b))
return 0;
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->data->min_key);
if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
+ btree_root_bad_min_key,
"btree root with incorrect min_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented");
ret = -BCH_ERR_fsck_repair_unimplemented;
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->data->max_key);
if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
+ btree_root_bad_max_key,
"btree root with incorrect max_key: %s", buf.buf)) {
bch_err(c, "repair unimplemented");
ret = -BCH_ERR_fsck_repair_unimplemented;
fsck_err:
six_unlock_read(&b->c.lock);
- if (ret < 0)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
printbuf_exit(&buf);
return ret;
}
static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
{
- struct btree_trans trans;
+ struct btree_trans *trans = bch2_trans_get(c);
enum btree_id ids[BTREE_ID_NR];
unsigned i;
int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
-
- if (initial)
- trans.is_initial_gc = true;
-
for (i = 0; i < BTREE_ID_NR; i++)
ids[i] = i;
bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
for (i = 0; i < BTREE_ID_NR && !ret; i++)
ret = initial
- ? bch2_gc_btree_init(&trans, ids[i], metadata_only)
- : bch2_gc_btree(&trans, ids[i], initial, metadata_only);
+ ? bch2_gc_btree_init(trans, ids[i], metadata_only)
+ : bch2_gc_btree(trans, ids[i], initial, metadata_only);
+
+ for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) {
+ if (!bch2_btree_id_root(c, i)->alive)
+ continue;
- if (ret < 0)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ ret = initial
+ ? bch2_gc_btree_init(trans, i, metadata_only)
+ : bch2_gc_btree(trans, i, initial, metadata_only);
+ }
- bch2_trans_exit(&trans);
+ bch2_trans_put(trans);
+ bch_err_fn(c, ret);
return ret;
}
static void bch2_mark_superblocks(struct bch_fs *c)
{
- struct bch_dev *ca;
- unsigned i;
-
mutex_lock(&c->sb_lock);
gc_pos_set(c, gc_phase(GC_PHASE_SB));
- for_each_online_member(ca, c, i)
+ for_each_online_member(c, ca)
bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
mutex_unlock(&c->sb_lock);
}
static void bch2_gc_free(struct bch_fs *c)
{
- struct bch_dev *ca;
- unsigned i;
-
genradix_free(&c->reflink_gc_table);
genradix_free(&c->gc_stripes);
- for_each_member_device(ca, c, i) {
+ for_each_member_device(c, ca) {
kvpfree(rcu_dereference_protected(ca->buckets_gc, 1),
sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket));
bool verify = !metadata_only &&
!c->opts.reconstruct_alloc &&
(!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
- unsigned i, dev;
+ unsigned i;
int ret = 0;
percpu_down_write(&c->mark_lock);
-#define copy_field(_f, _msg, ...) \
+#define copy_field(_err, _f, _msg, ...) \
if (dst->_f != src->_f && \
(!verify || \
- fsck_err(c, _msg ": got %llu, should be %llu" \
+ fsck_err(c, _err, _msg ": got %llu, should be %llu" \
, ##__VA_ARGS__, dst->_f, src->_f))) \
dst->_f = src->_f
-#define copy_stripe_field(_f, _msg, ...) \
- if (dst->_f != src->_f && \
- (!verify || \
- fsck_err(c, "stripe %zu has wrong "_msg \
- ": got %u, should be %u", \
- iter.pos, ##__VA_ARGS__, \
- dst->_f, src->_f))) \
- dst->_f = src->_f
-#define copy_dev_field(_f, _msg, ...) \
- copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
-#define copy_fs_field(_f, _msg, ...) \
- copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
+#define copy_dev_field(_err, _f, _msg, ...) \
+ copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
+#define copy_fs_field(_err, _f, _msg, ...) \
+ copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
bch2_fs_usage_acc_to_base(c, i);
- for_each_member_device(ca, c, dev) {
+ __for_each_member_device(c, ca) {
struct bch_dev_usage *dst = ca->usage_base;
struct bch_dev_usage *src = (void *)
- bch2_acc_percpu_u64s((void *) ca->usage_gc,
+ bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
dev_usage_u64s());
- copy_dev_field(buckets_ec, "buckets_ec");
-
for (i = 0; i < BCH_DATA_NR; i++) {
- copy_dev_field(d[i].buckets, "%s buckets", bch2_data_types[i]);
- copy_dev_field(d[i].sectors, "%s sectors", bch2_data_types[i]);
- copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]);
+ copy_dev_field(dev_usage_buckets_wrong,
+ d[i].buckets, "%s buckets", bch2_data_types[i]);
+ copy_dev_field(dev_usage_sectors_wrong,
+ d[i].sectors, "%s sectors", bch2_data_types[i]);
+ copy_dev_field(dev_usage_fragmented_wrong,
+ d[i].fragmented, "%s fragmented", bch2_data_types[i]);
}
- };
+ }
{
unsigned nr = fs_usage_u64s(c);
struct bch_fs_usage *dst = c->usage_base;
struct bch_fs_usage *src = (void *)
- bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
+ bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
- copy_fs_field(hidden, "hidden");
- copy_fs_field(btree, "btree");
+ copy_fs_field(fs_usage_hidden_wrong,
+ hidden, "hidden");
+ copy_fs_field(fs_usage_btree_wrong,
+ btree, "btree");
if (!metadata_only) {
- copy_fs_field(data, "data");
- copy_fs_field(cached, "cached");
- copy_fs_field(reserved, "reserved");
- copy_fs_field(nr_inodes,"nr_inodes");
+ copy_fs_field(fs_usage_data_wrong,
+ data, "data");
+ copy_fs_field(fs_usage_cached_wrong,
+ cached, "cached");
+ copy_fs_field(fs_usage_reserved_wrong,
+ reserved, "reserved");
+ copy_fs_field(fs_usage_nr_inodes_wrong,
+ nr_inodes,"nr_inodes");
for (i = 0; i < BCH_REPLICAS_MAX; i++)
- copy_fs_field(persistent_reserved[i],
+ copy_fs_field(fs_usage_persistent_reserved_wrong,
+ persistent_reserved[i],
"persistent_reserved[%i]", i);
}
for (i = 0; i < c->replicas.nr; i++) {
- struct bch_replicas_entry *e =
+ struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(&c->replicas, i);
if (metadata_only &&
printbuf_reset(&buf);
bch2_replicas_entry_to_text(&buf, e);
- copy_fs_field(replicas[i], "%s", buf.buf);
+ copy_fs_field(fs_usage_replicas_wrong,
+ replicas[i], "%s", buf.buf);
}
}
fsck_err:
if (ca)
percpu_ref_put(&ca->ref);
- if (ret)
- bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
+ bch_err_fn(c, ret);
percpu_up_write(&c->mark_lock);
printbuf_exit(&buf);
static int bch2_gc_start(struct bch_fs *c)
{
- struct bch_dev *ca = NULL;
- unsigned i;
-
BUG_ON(c->usage_gc);
c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
return -BCH_ERR_ENOMEM_gc_start;
}
- for_each_member_device(ca, c, i) {
+ for_each_member_device(c, ca) {
BUG_ON(ca->usage_gc);
ca->usage_gc = alloc_percpu(struct bch_dev_usage);
static int bch2_gc_reset(struct bch_fs *c)
{
- struct bch_dev *ca;
- unsigned i;
-
- for_each_member_device(ca, c, i) {
+ for_each_member_device(c, ca) {
free_percpu(ca->usage_gc);
ca->usage_gc = NULL;
}
enum bch_data_type type;
int ret;
- if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
- return 1;
-
old = bch2_alloc_to_v4(k, &old_convert);
new = *old;
if (c->opts.reconstruct_alloc ||
fsck_err_on(new.data_type != gc.data_type, c,
+ alloc_key_data_type_wrong,
"bucket %llu:%llu gen %u has wrong data_type"
": got %s, should be %s",
iter->pos.inode, iter->pos.offset,
bch2_data_types[gc.data_type]))
new.data_type = gc.data_type;
-#define copy_bucket_field(_f) \
+#define copy_bucket_field(_errtype, _f) \
if (c->opts.reconstruct_alloc || \
- fsck_err_on(new._f != gc._f, c, \
+ fsck_err_on(new._f != gc._f, c, _errtype, \
"bucket %llu:%llu gen %u data type %s has wrong " #_f \
": got %u, should be %u", \
iter->pos.inode, iter->pos.offset, \
new._f, gc._f)) \
new._f = gc._f; \
- copy_bucket_field(gen);
- copy_bucket_field(dirty_sectors);
- copy_bucket_field(cached_sectors);
- copy_bucket_field(stripe_redundancy);
- copy_bucket_field(stripe);
+ copy_bucket_field(alloc_key_gen_wrong,
+ gen);
+ copy_bucket_field(alloc_key_dirty_sectors_wrong,
+ dirty_sectors);
+ copy_bucket_field(alloc_key_cached_sectors_wrong,
+ cached_sectors);
+ copy_bucket_field(alloc_key_stripe_wrong,
+ stripe);
+ copy_bucket_field(alloc_key_stripe_redundancy_wrong,
+ stripe_redundancy);
#undef copy_bucket_field
if (!bch2_alloc_v4_cmp(*old, new))
static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
{
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_dev *ca;
- unsigned i;
int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
-
- for_each_member_device(ca, c, i) {
- ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
- POS(ca->dev_idx, ca->mi.first_bucket),
- BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
- NULL, NULL, BTREE_INSERT_LAZY_RW,
- bch2_alloc_write_key(&trans, &iter, k, metadata_only));
-
- if (ret < 0) {
- bch_err(c, "error writing alloc info: %s", bch2_err_str(ret));
+ for_each_member_device(c, ca) {
+ ret = bch2_trans_run(c,
+ for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
+ POS(ca->dev_idx, ca->mi.first_bucket),
+ POS(ca->dev_idx, ca->mi.nbuckets - 1),
+ BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
+ bch2_alloc_write_key(trans, &iter, k, metadata_only)));
+ if (ret) {
percpu_ref_put(&ca->ref);
break;
}
}
- bch2_trans_exit(&trans);
- return ret < 0 ? ret : 0;
+ bch_err_fn(c, ret);
+ return ret;
}
static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
{
- struct bch_dev *ca;
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bucket *g;
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a;
- unsigned i;
- int ret;
-
- for_each_member_device(ca, c, i) {
+ for_each_member_device(c, ca) {
struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket),
GFP_KERNEL|__GFP_ZERO);
buckets->first_bucket = ca->mi.first_bucket;
buckets->nbuckets = ca->mi.nbuckets;
rcu_assign_pointer(ca->buckets_gc, buckets);
- };
-
- bch2_trans_init(&trans, c, 0, 0);
-
- for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_PREFETCH, k, ret) {
- ca = bch_dev_bkey_exists(c, k.k->p.inode);
- g = gc_bucket(ca, k.k->p.offset);
-
- a = bch2_alloc_to_v4(k, &a_convert);
+ }
- g->gen_valid = 1;
- g->gen = a->gen;
+ int ret = bch2_trans_run(c,
+ for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ({
+ struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ struct bucket *g = gc_bucket(ca, k.k->p.offset);
- if (metadata_only &&
- (a->data_type == BCH_DATA_user ||
- a->data_type == BCH_DATA_cached ||
- a->data_type == BCH_DATA_parity)) {
- g->data_type = a->data_type;
- g->dirty_sectors = a->dirty_sectors;
- g->cached_sectors = a->cached_sectors;
- g->stripe = a->stripe;
- g->stripe_redundancy = a->stripe_redundancy;
- }
- }
- bch2_trans_iter_exit(&trans, &iter);
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
- bch2_trans_exit(&trans);
+ g->gen_valid = 1;
+ g->gen = a->gen;
- if (ret)
- bch_err(c, "error reading alloc info at gc start: %s", bch2_err_str(ret));
+ if (metadata_only &&
+ (a->data_type == BCH_DATA_user ||
+ a->data_type == BCH_DATA_cached ||
+ a->data_type == BCH_DATA_parity)) {
+ g->data_type = a->data_type;
+ g->dirty_sectors = a->dirty_sectors;
+ g->cached_sectors = a->cached_sectors;
+ g->stripe = a->stripe;
+ g->stripe_redundancy = a->stripe_redundancy;
+ }
+ 0;
+ })));
+ bch_err_fn(c, ret);
return ret;
}
static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
{
- struct bch_dev *ca;
- unsigned i;
-
- for_each_member_device(ca, c, i) {
+ for_each_member_device(c, ca) {
struct bucket_array *buckets = gc_bucket_array(ca);
struct bucket *g;
g->dirty_sectors = 0;
g->cached_sectors = 0;
}
- };
+ }
}
static int bch2_gc_write_reflink_key(struct btree_trans *trans,
}
if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
+ reflink_v_refcount_wrong,
"reflink key has wrong refcount:\n"
" %s\n"
" should be %u",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
- struct bkey_i *new = bch2_bkey_make_mut(trans, k);
+ struct bkey_i *new = bch2_bkey_make_mut(trans, iter, &k, 0);
ret = PTR_ERR_OR_ZERO(new);
if (ret)
if (!r->refcount)
new->k.type = KEY_TYPE_deleted;
else
- *bkey_refcount(new) = cpu_to_le64(r->refcount);
-
- ret = bch2_trans_update(trans, iter, new, 0);
+ *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
}
fsck_err:
printbuf_exit(&buf);
static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
{
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
size_t idx = 0;
- int ret = 0;
if (metadata_only)
return 0;
- bch2_trans_init(&trans, c, 0, 0);
-
- ret = for_each_btree_key_commit(&trans, iter,
- BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_PREFETCH, k,
- NULL, NULL, BTREE_INSERT_NOFAIL,
- bch2_gc_write_reflink_key(&trans, &iter, k, &idx));
-
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter,
+ BTREE_ID_reflink, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+ bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
c->reflink_gc_nr = 0;
- bch2_trans_exit(&trans);
return ret;
}
static int bch2_gc_reflink_start(struct bch_fs *c,
bool metadata_only)
{
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct reflink_gc *r;
- int ret = 0;
if (metadata_only)
return 0;
- bch2_trans_init(&trans, c, 0, 0);
c->reflink_gc_nr = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_PREFETCH, k, ret) {
- const __le64 *refcount = bkey_refcount_c(k);
+ int ret = bch2_trans_run(c,
+ for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ({
+ const __le64 *refcount = bkey_refcount_c(k);
- if (!refcount)
- continue;
+ if (!refcount)
+ continue;
- r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
- GFP_KERNEL);
- if (!r) {
- ret = -BCH_ERR_ENOMEM_gc_reflink_start;
- break;
- }
+ struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table,
+ c->reflink_gc_nr++, GFP_KERNEL);
+ if (!r) {
+ ret = -BCH_ERR_ENOMEM_gc_reflink_start;
+ break;
+ }
- r->offset = k.k->p.offset;
- r->size = k.k->size;
- r->refcount = 0;
- }
- bch2_trans_iter_exit(&trans, &iter);
+ r->offset = k.k->p.offset;
+ r->size = k.k->size;
+ r->refcount = 0;
+ 0;
+ })));
- bch2_trans_exit(&trans);
+ bch_err_fn(c, ret);
return ret;
}
if (bad)
bch2_bkey_val_to_text(&buf, c, k);
- if (fsck_err_on(bad, c, "%s", buf.buf)) {
+ if (fsck_err_on(bad, c, stripe_sector_count_wrong,
+ "%s", buf.buf)) {
struct bkey_i_stripe *new;
new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
{
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-
if (metadata_only)
return 0;
- bch2_trans_init(&trans, c, 0, 0);
-
- ret = for_each_btree_key_commit(&trans, iter,
- BTREE_ID_stripes, POS_MIN,
- BTREE_ITER_PREFETCH, k,
- NULL, NULL, BTREE_INSERT_NOFAIL,
- bch2_gc_write_stripes_key(&trans, &iter, k));
-
- bch2_trans_exit(&trans);
- return ret;
+ return bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter,
+ BTREE_ID_stripes, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+ bch2_gc_write_stripes_key(trans, &iter, k)));
}
static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
/**
* bch2_gc - walk _all_ references to buckets, and recompute them:
*
+ * @c: filesystem object
+ * @initial: are we in recovery?
+ * @metadata_only: are we just checking metadata references, or everything?
+ *
+ * Returns: 0 on success, or standard errcode on failure
+ *
* Order matters here:
* - Concurrent GC relies on the fact that we have a total ordering for
* everything that GC walks - see gc_will_visit_node(),
bch2_mark_superblocks(c);
- if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb) &&
- !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags) &&
- c->opts.fix_errors != FSCK_OPT_NO) {
- bch_info(c, "Starting topology repair pass");
- ret = bch2_repair_topology(c);
- if (ret)
- goto out;
- bch_info(c, "Topology repair pass done");
-
- set_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags);
- }
-
ret = bch2_gc_btrees(c, initial, metadata_only);
- if (ret == -BCH_ERR_need_topology_repair &&
- !test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags) &&
- !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) {
- set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
- SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, true);
- ret = 0;
- }
-
- if (ret == -BCH_ERR_need_topology_repair)
- ret = -BCH_ERR_fsck_errors_not_fixed;
-
if (ret)
goto out;
#endif
c->gc_count++;
- if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) ||
+ if (test_bit(BCH_FS_need_another_gc, &c->flags) ||
(!iter && bch2_test_restart_gc)) {
if (iter++ > 2) {
bch_info(c, "Unable to fix bucket gens, looping");
* XXX: make sure gens we fixed got saved
*/
bch_info(c, "Second GC pass needed, restarting:");
- clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ clear_bit(BCH_FS_need_another_gc, &c->flags);
__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
bch2_gc_stripes_reset(c, metadata_only);
* allocator thread - issue wakeup in case they blocked on gc_lock:
*/
closure_wake_up(&c->freelist_wait);
+ bch_err_fn(c, ret);
return ret;
}
{
struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
struct bkey_i *u;
int ret;
percpu_up_read(&c->mark_lock);
return 0;
update:
- u = bch2_bkey_make_mut(trans, k);
+ u = bch2_bkey_make_mut(trans, iter, &k, 0);
ret = PTR_ERR_OR_ZERO(u);
if (ret)
return ret;
bch2_extent_normalize(c, bkey_i_to_s(u));
- return bch2_trans_update(trans, iter, u, 0);
+ return 0;
}
static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter,
int bch2_gc_gens(struct bch_fs *c)
{
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_dev *ca;
u64 b, start_time = local_clock();
- unsigned i;
int ret;
/*
trace_and_count(c, gc_gens_start, c);
down_read(&c->gc_lock);
- bch2_trans_init(&trans, c, 0, 0);
- for_each_member_device(ca, c, i) {
- struct bucket_gens *gens;
+ for_each_member_device(c, ca) {
+ struct bucket_gens *gens = bucket_gens(ca);
BUG_ON(ca->oldest_gen);
- ca->oldest_gen = kvmalloc(ca->mi.nbuckets, GFP_KERNEL);
+ ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
if (!ca->oldest_gen) {
percpu_ref_put(&ca->ref);
ret = -BCH_ERR_ENOMEM_gc_gens;
goto err;
}
- gens = bucket_gens(ca);
-
for (b = gens->first_bucket;
b < gens->nbuckets; b++)
ca->oldest_gen[b] = gens->b[b];
}
- for (i = 0; i < BTREE_ID_NR; i++)
+ for (unsigned i = 0; i < BTREE_ID_NR; i++)
if (btree_type_has_ptrs(i)) {
- struct btree_iter iter;
- struct bkey_s_c k;
-
c->gc_gens_btree = i;
c->gc_gens_pos = POS_MIN;
- ret = for_each_btree_key_commit(&trans, iter, i,
- POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
- k,
- NULL, NULL,
- BTREE_INSERT_NOFAIL,
- gc_btree_gens_key(&trans, &iter, k));
- if (ret && !bch2_err_matches(ret, EROFS))
- bch_err(c, "error recalculating oldest_gen: %s", bch2_err_str(ret));
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter, i,
+ POS_MIN,
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
+ k,
+ NULL, NULL,
+ BCH_TRANS_COMMIT_no_enospc,
+ gc_btree_gens_key(trans, &iter, k)));
if (ret)
goto err;
}
- ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
- POS_MIN,
- BTREE_ITER_PREFETCH,
- k,
- NULL, NULL,
- BTREE_INSERT_NOFAIL,
- bch2_alloc_write_oldest_gen(&trans, &iter, k));
- if (ret && !bch2_err_matches(ret, EROFS))
- bch_err(c, "error writing oldest_gen: %s", bch2_err_str(ret));
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
+ POS_MIN,
+ BTREE_ITER_PREFETCH,
+ k,
+ NULL, NULL,
+ BCH_TRANS_COMMIT_no_enospc,
+ bch2_alloc_write_oldest_gen(trans, &iter, k)));
if (ret)
goto err;
bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
trace_and_count(c, gc_gens_end, c);
err:
- for_each_member_device(ca, c, i) {
+ for_each_member_device(c, ca) {
kvfree(ca->oldest_gen);
ca->oldest_gen = NULL;
}
- bch2_trans_exit(&trans);
up_read(&c->gc_lock);
mutex_unlock(&c->gc_gens_lock);
+ if (!bch2_err_matches(ret, EROFS))
+ bch_err_fn(c, ret);
return ret;
}
struct io_clock *clock = &c->io_clock[WRITE];
unsigned long last = atomic64_read(&clock->now);
unsigned last_kick = atomic_read(&c->kick_gc);
- int ret;
set_freezable();
#if 0
ret = bch2_gc(c, false, false);
#else
- ret = bch2_gc_gens(c);
+ bch2_gc_gens(c);
#endif
- if (ret < 0)
- bch_err(c, "btree gc failed: %s", bch2_err_str(ret));
-
debug_check_no_locks_held();
}
p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
if (IS_ERR(p)) {
- bch_err(c, "error creating gc thread: %s", bch2_err_str(PTR_ERR(p)));
+ bch_err_fn(c, PTR_ERR(p));
return PTR_ERR(p);
}