X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fmove.c;h=39a14e3216807d222fc11856fa285448f4a24ca3;hb=6a34032417d9bb90ead6f3b7bf891347bc4a1ed3;hp=7dac9264304e4188f43d591790900e4fa3bed518;hpb=46a6b9210c927ab46fd1227cb6f641be0b4a7505;p=bcachefs-tools-debian diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 7dac926..39a14e3 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -14,17 +14,50 @@ #include "errcode.h" #include "error.h" #include "inode.h" -#include "io.h" +#include "io_read.h" +#include "io_write.h" #include "journal_reclaim.h" +#include "keylist.h" #include "move.h" #include "replicas.h" #include "super-io.h" -#include "keylist.h" +#include "trace.h" #include #include -#include +static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent(c, buf.buf); + printbuf_exit(&buf); + } +} + +static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_read_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent_read(c, buf.buf); + printbuf_exit(&buf); + } +} + +static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_alloc_mem_fail_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent_alloc_mem_fail(c, buf.buf); + printbuf_exit(&buf); + } +} static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats) { @@ -41,28 +74,36 @@ static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats) } struct moving_io { - struct list_head list; - struct closure cl; - bool read_completed; + struct list_head read_list; + struct list_head io_list; + struct move_bucket_in_flight *b; + struct closure cl; + bool read_completed; - unsigned read_sectors; - unsigned write_sectors; + unsigned read_sectors; + unsigned write_sectors; - struct bch_read_bio rbio; + struct bch_read_bio rbio; - struct data_update write; + struct data_update write; /* Must be last since it is variable size */ - struct bio_vec bi_inline_vecs[0]; + struct bio_vec bi_inline_vecs[0]; }; static void move_free(struct moving_io *io) { struct moving_context *ctxt = io->write.ctxt; - struct bch_fs *c = ctxt->c; + + if (io->b) + atomic_dec(&io->b->count); bch2_data_update_exit(&io->write); + + mutex_lock(&ctxt->lock); + list_del(&io->io_list); wake_up(&ctxt->wait); - bch2_write_ref_put(c, BCH_WRITE_REF_move); + mutex_unlock(&ctxt->lock); + kfree(io); } @@ -97,7 +138,7 @@ static void move_write(struct moving_io *io) struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt) { struct moving_io *io = - list_first_entry_or_null(&ctxt->reads, struct moving_io, list); + list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list); return io && io->read_completed ? io : NULL; } @@ -124,7 +165,7 @@ void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt, bch2_trans_unlock(trans); while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) { - list_del(&io->list); + list_del(&io->read_list); move_write(io); } } @@ -141,6 +182,8 @@ static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt, void bch2_moving_ctxt_exit(struct moving_context *ctxt) { + struct bch_fs *c = ctxt->c; + move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); closure_sync(&ctxt->cl); @@ -150,12 +193,15 @@ void bch2_moving_ctxt_exit(struct moving_context *ctxt) EBUG_ON(atomic_read(&ctxt->read_ios)); if (ctxt->stats) { - progress_list_del(ctxt->c, ctxt->stats); - - trace_move_data(ctxt->c, + progress_list_del(c, ctxt->stats); + trace_move_data(c, atomic64_read(&ctxt->stats->sectors_moved), atomic64_read(&ctxt->stats->keys_moved)); } + + mutex_lock(&c->moving_context_lock); + list_del(&ctxt->list); + mutex_unlock(&c->moving_context_lock); } void bch2_moving_ctxt_init(struct moving_context *ctxt, @@ -168,15 +214,23 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt, memset(ctxt, 0, sizeof(*ctxt)); ctxt->c = c; + ctxt->fn = (void *) _RET_IP_; ctxt->rate = rate; ctxt->stats = stats; ctxt->wp = wp; ctxt->wait_on_copygc = wait_on_copygc; closure_init_stack(&ctxt->cl); + + mutex_init(&ctxt->lock); INIT_LIST_HEAD(&ctxt->reads); + INIT_LIST_HEAD(&ctxt->ios); init_waitqueue_head(&ctxt->wait); + mutex_lock(&c->moving_context_lock); + list_add(&ctxt->list, &c->moving_context_list); + mutex_unlock(&c->moving_context_lock); + if (stats) { progress_list_add(c, stats); stats->data_type = BCH_DATA_user; @@ -198,7 +252,7 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans, struct bkey_i *n; int ret; - n = bch2_bkey_make_mut(trans, k); + n = bch2_bkey_make_mut_noupdate(trans, k); ret = PTR_ERR_OR_ZERO(n); if (ret) return ret; @@ -227,13 +281,15 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans, if (bkey_deleted(&n->k)) n->k.size = 0; - return bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: + return bch2_trans_relock(trans) ?: + bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL); } static int bch2_move_extent(struct btree_trans *trans, struct btree_iter *iter, struct moving_context *ctxt, + struct move_bucket_in_flight *bucket_in_flight, struct bch_io_opts io_opts, enum btree_id btree_id, struct bkey_s_c k, @@ -247,6 +303,8 @@ static int bch2_move_extent(struct btree_trans *trans, unsigned sectors = k.k->size, pages; int ret = -ENOMEM; + trace_move_extent2(c, k); + bch2_data_update_opts_normalize(k, &data_opts); if (!data_opts.rewrite_ptrs && @@ -256,9 +314,6 @@ static int bch2_move_extent(struct btree_trans *trans, return 0; } - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_move)) - return -BCH_ERR_erofs_no_writes; - /* * Before memory allocations & taking nocow locks in * bch2_data_update_init(): @@ -275,6 +330,7 @@ static int bch2_move_extent(struct btree_trans *trans, if (!io) goto err; + INIT_LIST_HEAD(&io->io_list); io->write.ctxt = ctxt; io->read_sectors = k.k->size; io->write_sectors = k.k->size; @@ -294,7 +350,7 @@ static int bch2_move_extent(struct btree_trans *trans, bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); io->rbio.bio.bi_iter.bi_size = sectors << 9; - bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0); + io->rbio.bio.bi_opf = REQ_OP_READ; io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); io->rbio.bio.bi_end_io = move_read_endio; @@ -303,12 +359,6 @@ static int bch2_move_extent(struct btree_trans *trans, if (ret && ret != -BCH_ERR_unwritten_extent_update) goto err_free_pages; - io->write.ctxt = ctxt; - io->write.op.end_io = move_write_done; - - atomic64_inc(&ctxt->stats->keys_moved); - atomic64_add(k.k->size, &ctxt->stats->sectors_moved); - if (ret == -BCH_ERR_unwritten_extent_update) { bch2_update_unwritten_extent(trans, &io->write); move_free(io); @@ -317,13 +367,30 @@ static int bch2_move_extent(struct btree_trans *trans, BUG_ON(ret); + io->write.ctxt = ctxt; + io->write.op.end_io = move_write_done; + + if (ctxt->stats) { + atomic64_inc(&ctxt->stats->keys_moved); + atomic64_add(k.k->size, &ctxt->stats->sectors_moved); + } + + if (bucket_in_flight) { + io->b = bucket_in_flight; + atomic_inc(&io->b->count); + } + this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size); - trace_move_extent_read(k.k); + trace_move_extent_read2(c, k); + mutex_lock(&ctxt->lock); atomic_add(io->read_sectors, &ctxt->read_sectors); atomic_inc(&ctxt->read_ios); - list_add_tail(&io->list, &ctxt->reads); + + list_add_tail(&io->read_list, &ctxt->reads); + list_add_tail(&io->io_list, &ctxt->ios); + mutex_unlock(&ctxt->lock); /* * dropped by move_read_endio() - guards against use after free of @@ -341,8 +408,8 @@ err_free_pages: err_free: kfree(io); err: - bch2_write_ref_put(c, BCH_WRITE_REF_move); - trace_and_count(c, move_extent_alloc_mem_fail, k.k); + this_cpu_inc(c->counters[BCH_COUNTER_move_extent_alloc_mem_fail]); + trace_move_extent_alloc_mem_fail2(c, k); return ret; } @@ -361,7 +428,7 @@ static int lookup_inode(struct btree_trans *trans, struct bpos pos, goto err; if (!k.k || !bkey_eq(k.k->p, pos)) { - ret = -ENOENT; + ret = -BCH_ERR_ENOENT_inode; goto err; } @@ -458,7 +525,7 @@ static int __bch2_move_data(struct moving_context *ctxt, struct bch_fs *c = ctxt->c; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); struct bkey_buf sk; - struct btree_trans trans; + struct btree_trans *trans = bch2_trans_get(c); struct btree_iter iter; struct bkey_s_c k; struct data_update_opts data_opts; @@ -466,21 +533,22 @@ static int __bch2_move_data(struct moving_context *ctxt, int ret = 0, ret2; bch2_bkey_buf_init(&sk); - bch2_trans_init(&trans, c, 0, 0); - ctxt->stats->data_type = BCH_DATA_user; - ctxt->stats->btree_id = btree_id; - ctxt->stats->pos = start; + if (ctxt->stats) { + ctxt->stats->data_type = BCH_DATA_user; + ctxt->stats->btree_id = btree_id; + ctxt->stats->pos = start; + } - bch2_trans_iter_init(&trans, &iter, btree_id, start, + bch2_trans_iter_init(trans, &iter, btree_id, start, BTREE_ITER_PREFETCH| BTREE_ITER_ALL_SNAPSHOTS); if (ctxt->rate) bch2_ratelimit_reset(ctxt->rate); - while (!move_ratelimit(&trans, ctxt)) { - bch2_trans_begin(&trans); + while (!move_ratelimit(trans, ctxt)) { + bch2_trans_begin(trans); k = bch2_btree_iter_peek(&iter); if (!k.k) @@ -495,12 +563,13 @@ static int __bch2_move_data(struct moving_context *ctxt, if (bkey_ge(bkey_start_pos(k.k), end)) break; - ctxt->stats->pos = iter.pos; + if (ctxt->stats) + ctxt->stats->pos = iter.pos; if (!bkey_extent_is_direct_data(k.k)) goto next_nondata; - ret = move_get_io_opts(&trans, &io_opts, k, &cur_inum); + ret = move_get_io_opts(trans, &io_opts, k, &cur_inum); if (ret) continue; @@ -514,17 +583,16 @@ static int __bch2_move_data(struct moving_context *ctxt, */ bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - bch2_trans_unlock(&trans); - ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts, - btree_id, k, data_opts); + ret2 = bch2_move_extent(trans, &iter, ctxt, NULL, + io_opts, btree_id, k, data_opts); if (ret2) { if (bch2_err_matches(ret2, BCH_ERR_transaction_restart)) continue; if (ret2 == -ENOMEM) { /* memory allocation failure, wait for some IO to finish */ - bch2_move_ctxt_wait_for_io(ctxt, &trans); + bch2_move_ctxt_wait_for_io(ctxt, trans); continue; } @@ -535,13 +603,14 @@ static int __bch2_move_data(struct moving_context *ctxt, if (ctxt->rate) bch2_ratelimit_increment(ctxt->rate, k.k->size); next: - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + if (ctxt->stats) + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); next_nondata: bch2_btree_iter_advance(&iter); } - bch2_trans_iter_exit(&trans, &iter); - bch2_trans_exit(&trans); + bch2_trans_iter_exit(trans, &iter); + bch2_trans_put(trans); bch2_bkey_buf_exit(&sk, c); return ret; @@ -558,12 +627,12 @@ int bch2_move_data(struct bch_fs *c, { struct moving_context ctxt; enum btree_id id; - int ret; + int ret = 0; bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); for (id = start_btree_id; - id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1); + id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1); id++) { stats->btree_id = id; @@ -571,6 +640,9 @@ int bch2_move_data(struct bch_fs *c, id != BTREE_ID_reflink) continue; + if (!bch2_btree_id_root(c, id)->b) + continue; + ret = __bch2_move_data(&ctxt, id == start_btree_id ? start_pos : POS_MIN, id == end_btree_id ? end_pos : POS_MAX, @@ -584,77 +656,9 @@ int bch2_move_data(struct bch_fs *c, return ret; } -static noinline void verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen) -{ - struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k; - struct printbuf buf = PRINTBUF; - struct bch_backpointer bp; - u64 bp_offset = 0; - int ret; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, - bucket, BTREE_ITER_CACHED); -again: - ret = lockrestart_do(trans, - bkey_err(k = bch2_btree_iter_peek_slot(&iter))); - - if (!ret && k.k->type == KEY_TYPE_alloc_v4) { - struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); - - if (a.v->gen == gen && - a.v->dirty_sectors) { - if (a.v->data_type == BCH_DATA_btree) { - bch2_trans_unlock(trans); - if (bch2_btree_interior_updates_flush(c)) - goto again; - goto failed_to_evacuate; - } - } - } - - bch2_trans_iter_exit(trans, &iter); - return; -failed_to_evacuate: - bch2_trans_iter_exit(trans, &iter); - - prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket ")); - bch2_bkey_val_to_text(&buf, c, k); - - while (1) { - bch2_trans_begin(trans); - - ret = bch2_get_next_backpointer(trans, bucket, gen, - &bp_offset, &bp, - BTREE_ITER_CACHED); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - continue; - if (ret) - break; - if (bp_offset == U64_MAX) - break; - - k = bch2_backpointer_get_key(trans, &iter, - bucket, bp_offset, bp); - ret = bkey_err(k); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - continue; - if (ret) - break; - if (!k.k) - continue; - prt_newline(&buf); - bch2_bkey_val_to_text(&buf, c, k); - bch2_trans_iter_exit(trans, &iter); - } - - bch2_print_string_as_lines(KERN_ERR, buf.buf); - printbuf_exit(&buf); -} - int __bch2_evacuate_bucket(struct btree_trans *trans, struct moving_context *ctxt, + struct move_bucket_in_flight *bucket_in_flight, struct bpos bucket, int gen, struct data_update_opts _data_opts) { @@ -669,11 +673,19 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, struct data_update_opts data_opts; unsigned dirty_sectors, bucket_size; u64 fragmentation; - u64 bp_offset = 0, cur_inum = U64_MAX; + u64 cur_inum = U64_MAX; + struct bpos bp_pos = POS_MIN; int ret = 0; + trace_bucket_evacuate(c, &bucket); + bch2_bkey_buf_init(&sk); + /* + * We're not run in a context that handles transaction restarts: + */ + bch2_trans_begin(trans); + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_CACHED); ret = lockrestart_do(trans, @@ -681,7 +693,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, bch2_trans_iter_exit(trans, &iter); if (ret) { - bch_err(c, "%s: error looking up alloc key: %s", __func__, bch2_err_str(ret)); + bch_err_msg(c, ret, "looking up alloc key"); goto err; } @@ -692,7 +704,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, ret = bch2_btree_write_buffer_flush(trans); if (ret) { - bch_err(c, "%s: error flushing btree write buffer: %s", __func__, bch2_err_str(ret)); + bch_err_msg(c, ret, "flushing btree write buffer"); goto err; } @@ -700,22 +712,20 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, bch2_trans_begin(trans); ret = bch2_get_next_backpointer(trans, bucket, gen, - &bp_offset, &bp, + &bp_pos, &bp, BTREE_ITER_CACHED); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) goto err; - if (bp_offset == U64_MAX) + if (bkey_eq(bp_pos, POS_MAX)) break; if (!bp.level) { const struct bch_extent_ptr *ptr; - struct bkey_s_c k; unsigned i = 0; - k = bch2_backpointer_get_key(trans, &iter, - bucket, bp_offset, bp); + k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -738,13 +748,19 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, data_opts.rewrite_ptrs = 0; bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { - if (ptr->dev == bucket.inode) + if (ptr->dev == bucket.inode) { data_opts.rewrite_ptrs |= 1U << i; + if (ptr->cached) { + bch2_trans_iter_exit(trans, &iter); + goto next; + } + } i++; } - ret = bch2_move_extent(trans, &iter, ctxt, io_opts, - bp.btree_id, k, data_opts); + ret = bch2_move_extent(trans, &iter, ctxt, + bucket_in_flight, + io_opts, bp.btree_id, k, data_opts); bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -759,12 +775,12 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, if (ctxt->rate) bch2_ratelimit_increment(ctxt->rate, k.k->size); - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + if (ctxt->stats) + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); } else { struct btree *b; - b = bch2_backpointer_get_node(trans, &iter, - bucket, bp_offset, bp); + b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp); ret = PTR_ERR_OR_ZERO(b); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) continue; @@ -786,22 +802,16 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, if (ctxt->rate) bch2_ratelimit_increment(ctxt->rate, c->opts.btree_node_size >> 9); - atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); - atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); + if (ctxt->stats) { + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); + } } next: - bp_offset++; + bp_pos = bpos_nosnap_successor(bp_pos); } trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret); - - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && gen >= 0) { - bch2_trans_unlock(trans); - move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); - closure_sync(&ctxt->cl); - if (!ctxt->write_error) - verify_bucket_evacuated(trans, bucket, gen); - } err: bch2_bkey_buf_exit(&sk, c); return ret; @@ -815,15 +825,14 @@ int bch2_evacuate_bucket(struct bch_fs *c, struct write_point_specifier wp, bool wait_on_copygc) { - struct btree_trans trans; + struct btree_trans *trans = bch2_trans_get(c); struct moving_context ctxt; int ret; - bch2_trans_init(&trans, c, 0, 0); bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); - ret = __bch2_evacuate_bucket(&trans, &ctxt, bucket, gen, data_opts); + ret = __bch2_evacuate_bucket(trans, &ctxt, NULL, bucket, gen, data_opts); bch2_moving_ctxt_exit(&ctxt); - bch2_trans_exit(&trans); + bch2_trans_put(trans); return ret; } @@ -840,28 +849,30 @@ static int bch2_move_btree(struct bch_fs *c, { bool kthread = (current->flags & PF_KTHREAD) != 0; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_trans trans; + struct btree_trans *trans = bch2_trans_get(c); struct btree_iter iter; struct btree *b; enum btree_id id; struct data_update_opts data_opts; int ret = 0; - bch2_trans_init(&trans, c, 0, 0); progress_list_add(c, stats); stats->data_type = BCH_DATA_btree; for (id = start_btree_id; - id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1); + id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1); id++) { stats->btree_id = id; - bch2_trans_node_iter_init(&trans, &iter, id, POS_MIN, 0, 0, + if (!bch2_btree_id_root(c, id)->b) + continue; + + bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0, BTREE_ITER_PREFETCH); retry: ret = 0; - while (bch2_trans_begin(&trans), + while (bch2_trans_begin(trans), (b = bch2_btree_iter_peek_node(&iter)) && !(ret = PTR_ERR_OR_ZERO(b))) { if (kthread && kthread_should_stop()) @@ -876,7 +887,7 @@ retry: if (!pred(c, arg, b, &io_opts, &data_opts)) goto next; - ret = bch2_btree_node_rewrite(&trans, &iter, b, 0) ?: ret; + ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret; if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) @@ -887,16 +898,16 @@ next: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; - bch2_trans_iter_exit(&trans, &iter); + bch2_trans_iter_exit(trans, &iter); if (kthread && kthread_should_stop()) break; } - bch2_trans_exit(&trans); + bch2_trans_put(trans); if (ret) - bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret)); + bch_err_fn(c, ret); bch2_btree_interior_updates_flush(c); @@ -1021,6 +1032,8 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) mutex_unlock(&c->sb_lock); } + if (ret) + bch_err_fn(c, ret); return ret; } @@ -1086,3 +1099,61 @@ int bch2_data_job(struct bch_fs *c, return ret; } + +static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt) +{ + struct bch_move_stats *stats = ctxt->stats; + struct moving_io *io; + + prt_printf(out, "%s (%ps):", stats->name, ctxt->fn); + prt_newline(out); + + prt_printf(out, " data type %s btree_id %s position: ", + bch2_data_types[stats->data_type], + bch2_btree_ids[stats->btree_id]); + bch2_bpos_to_text(out, stats->pos); + prt_newline(out); + printbuf_indent_add(out, 2); + + prt_printf(out, "reads: ios %u/%u sectors %u/%u", + atomic_read(&ctxt->read_ios), + c->opts.move_ios_in_flight, + atomic_read(&ctxt->read_sectors), + c->opts.move_bytes_in_flight >> 9); + prt_newline(out); + + prt_printf(out, "writes: ios %u/%u sectors %u/%u", + atomic_read(&ctxt->write_ios), + c->opts.move_ios_in_flight, + atomic_read(&ctxt->write_sectors), + c->opts.move_bytes_in_flight >> 9); + prt_newline(out); + + printbuf_indent_add(out, 2); + + mutex_lock(&ctxt->lock); + list_for_each_entry(io, &ctxt->ios, io_list) + bch2_write_op_to_text(out, &io->write.op); + mutex_unlock(&ctxt->lock); + + printbuf_indent_sub(out, 4); +} + +void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct moving_context *ctxt; + + mutex_lock(&c->moving_context_lock); + list_for_each_entry(ctxt, &c->moving_context_list, list) + bch2_moving_ctxt_to_text(out, c, ctxt); + mutex_unlock(&c->moving_context_lock); +} + +void bch2_fs_move_init(struct bch_fs *c) +{ + INIT_LIST_HEAD(&c->moving_context_list); + mutex_init(&c->moving_context_lock); + + INIT_LIST_HEAD(&c->data_progress_list); + mutex_init(&c->data_progress_lock); +}