X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fmove.c;h=2ec30a3fd19377937768172386b49cf2ef093610;hb=1f78fed4693a5361f56508daac59bebd5b556379;hp=11e25a31ed7f329d500df0a01d8502289d4a2c09;hpb=934a84dfaf719af82dadbbe0e2480baff03c905b;p=bcachefs-tools-debian diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 11e25a3..2ec30a3 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "alloc_background.h" #include "alloc_foreground.h" #include "backpointers.h" #include "bkey_buf.h" #include "btree_gc.h" #include "btree_update.h" #include "btree_update_interior.h" +#include "btree_write_buffer.h" #include "disk_groups.h" #include "ec.h" #include "errcode.h" @@ -14,15 +16,47 @@ #include "inode.h" #include "io.h" #include "journal_reclaim.h" +#include "keylist.h" #include "move.h" #include "replicas.h" #include "super-io.h" -#include "keylist.h" +#include "trace.h" #include #include -#include +static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent(c, buf.buf); + printbuf_exit(&buf); + } +} + +static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_read_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent_read(c, buf.buf); + printbuf_exit(&buf); + } +} + +static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_alloc_mem_fail_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent_alloc_mem_fail(c, buf.buf); + printbuf_exit(&buf); + } +} static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats) { @@ -39,28 +73,36 @@ static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats) } struct moving_io { - struct list_head list; - struct closure cl; - bool read_completed; + struct list_head read_list; + struct list_head io_list; + struct move_bucket_in_flight *b; + struct closure cl; + bool read_completed; - unsigned read_sectors; - unsigned write_sectors; + unsigned read_sectors; + unsigned write_sectors; - struct bch_read_bio rbio; + struct bch_read_bio rbio; - struct data_update write; + struct data_update write; /* Must be last since it is variable size */ - struct bio_vec bi_inline_vecs[0]; + struct bio_vec bi_inline_vecs[0]; }; static void move_free(struct moving_io *io) { struct moving_context *ctxt = io->write.ctxt; - struct bch_fs *c = ctxt->c; + + if (io->b) + atomic_dec(&io->b->count); bch2_data_update_exit(&io->write); + + mutex_lock(&ctxt->lock); + list_del(&io->io_list); wake_up(&ctxt->wait); - percpu_ref_put(&c->writes); + mutex_unlock(&ctxt->lock); + kfree(io); } @@ -73,6 +115,7 @@ static void move_write_done(struct bch_write_op *op) ctxt->write_error = true; atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); + atomic_dec(&io->write.ctxt->write_ios); move_free(io); closure_put(&ctxt->cl); } @@ -86,14 +129,15 @@ static void move_write(struct moving_io *io) closure_get(&io->write.ctxt->cl); atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); + atomic_inc(&io->write.ctxt->write_ios); bch2_data_update_read_done(&io->write, io->rbio.pick.crc); } -static inline struct moving_io *next_pending_write(struct moving_context *ctxt) +struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt) { struct moving_io *io = - list_first_entry_or_null(&ctxt->reads, struct moving_io, list); + list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list); return io && io->read_completed ? io : NULL; } @@ -104,35 +148,27 @@ static void move_read_endio(struct bio *bio) struct moving_context *ctxt = io->write.ctxt; atomic_sub(io->read_sectors, &ctxt->read_sectors); + atomic_dec(&ctxt->read_ios); io->read_completed = true; wake_up(&ctxt->wait); closure_put(&ctxt->cl); } -static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *trans) +void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt, + struct btree_trans *trans) { struct moving_io *io; if (trans) bch2_trans_unlock(trans); - while ((io = next_pending_write(ctxt))) { - list_del(&io->list); + while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) { + list_del(&io->read_list); move_write(io); } } -#define move_ctxt_wait_event(_ctxt, _trans, _cond) \ -do { \ - do_pending_writes(_ctxt, _trans); \ - \ - if (_cond) \ - break; \ - __wait_event((_ctxt)->wait, \ - next_pending_write(_ctxt) || (_cond)); \ -} while (1) - static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt, struct btree_trans *trans) { @@ -145,17 +181,26 @@ static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt, void bch2_moving_ctxt_exit(struct moving_context *ctxt) { + struct bch_fs *c = ctxt->c; + move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); closure_sync(&ctxt->cl); + EBUG_ON(atomic_read(&ctxt->write_sectors)); + EBUG_ON(atomic_read(&ctxt->write_ios)); + EBUG_ON(atomic_read(&ctxt->read_sectors)); + EBUG_ON(atomic_read(&ctxt->read_ios)); if (ctxt->stats) { - progress_list_del(ctxt->c, ctxt->stats); - - trace_move_data(ctxt->c, + progress_list_del(c, ctxt->stats); + trace_move_data(c, atomic64_read(&ctxt->stats->sectors_moved), atomic64_read(&ctxt->stats->keys_moved)); } + + mutex_lock(&c->moving_context_lock); + list_del(&ctxt->list); + mutex_unlock(&c->moving_context_lock); } void bch2_moving_ctxt_init(struct moving_context *ctxt, @@ -168,15 +213,23 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt, memset(ctxt, 0, sizeof(*ctxt)); ctxt->c = c; + ctxt->fn = (void *) _RET_IP_; ctxt->rate = rate; ctxt->stats = stats; ctxt->wp = wp; ctxt->wait_on_copygc = wait_on_copygc; closure_init_stack(&ctxt->cl); + + mutex_init(&ctxt->lock); INIT_LIST_HEAD(&ctxt->reads); + INIT_LIST_HEAD(&ctxt->ios); init_waitqueue_head(&ctxt->wait); + mutex_lock(&c->moving_context_lock); + list_add(&ctxt->list, &c->moving_context_list); + mutex_unlock(&c->moving_context_lock); + if (stats) { progress_list_add(c, stats); stats->data_type = BCH_DATA_user; @@ -198,13 +251,11 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans, struct bkey_i *n; int ret; - n = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); + n = bch2_bkey_make_mut_noupdate(trans, k); ret = PTR_ERR_OR_ZERO(n); if (ret) return ret; - bkey_reassemble(n, k); - while (data_opts.kill_ptrs) { unsigned i = 0, drop = __fls(data_opts.kill_ptrs); struct bch_extent_ptr *ptr; @@ -229,13 +280,15 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans, if (bkey_deleted(&n->k)) n->k.size = 0; - return bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: + return bch2_trans_relock(trans) ?: + bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL); } static int bch2_move_extent(struct btree_trans *trans, struct btree_iter *iter, struct moving_context *ctxt, + struct move_bucket_in_flight *bucket_in_flight, struct bch_io_opts io_opts, enum btree_id btree_id, struct bkey_s_c k, @@ -249,6 +302,8 @@ static int bch2_move_extent(struct btree_trans *trans, unsigned sectors = k.k->size, pages; int ret = -ENOMEM; + trace_move_extent2(c, k); + bch2_data_update_opts_normalize(k, &data_opts); if (!data_opts.rewrite_ptrs && @@ -258,9 +313,6 @@ static int bch2_move_extent(struct btree_trans *trans, return 0; } - if (!percpu_ref_tryget_live(&c->writes)) - return -EROFS; - /* * Before memory allocations & taking nocow locks in * bch2_data_update_init(): @@ -277,6 +329,7 @@ static int bch2_move_extent(struct btree_trans *trans, if (!io) goto err; + INIT_LIST_HEAD(&io->io_list); io->write.ctxt = ctxt; io->read_sectors = k.k->size; io->write_sectors = k.k->size; @@ -296,21 +349,15 @@ static int bch2_move_extent(struct btree_trans *trans, bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); io->rbio.bio.bi_iter.bi_size = sectors << 9; - bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0); + io->rbio.bio.bi_opf = REQ_OP_READ; io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); io->rbio.bio.bi_end_io = move_read_endio; - ret = bch2_data_update_init(c, &io->write, ctxt->wp, io_opts, - data_opts, btree_id, k); + ret = bch2_data_update_init(trans, ctxt, &io->write, ctxt->wp, + io_opts, data_opts, btree_id, k); if (ret && ret != -BCH_ERR_unwritten_extent_update) goto err_free_pages; - io->write.ctxt = ctxt; - io->write.op.end_io = move_write_done; - - atomic64_inc(&ctxt->stats->keys_moved); - atomic64_add(k.k->size, &ctxt->stats->sectors_moved); - if (ret == -BCH_ERR_unwritten_extent_update) { bch2_update_unwritten_extent(trans, &io->write); move_free(io); @@ -319,12 +366,30 @@ static int bch2_move_extent(struct btree_trans *trans, BUG_ON(ret); + io->write.ctxt = ctxt; + io->write.op.end_io = move_write_done; + + if (ctxt->stats) { + atomic64_inc(&ctxt->stats->keys_moved); + atomic64_add(k.k->size, &ctxt->stats->sectors_moved); + } + + if (bucket_in_flight) { + io->b = bucket_in_flight; + atomic_inc(&io->b->count); + } + this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size); - trace_move_extent_read(k.k); + trace_move_extent_read2(c, k); + mutex_lock(&ctxt->lock); atomic_add(io->read_sectors, &ctxt->read_sectors); - list_add_tail(&io->list, &ctxt->reads); + atomic_inc(&ctxt->read_ios); + + list_add_tail(&io->read_list, &ctxt->reads); + list_add_tail(&io->io_list, &ctxt->ios); + mutex_unlock(&ctxt->lock); /* * dropped by move_read_endio() - guards against use after free of @@ -342,8 +407,8 @@ err_free_pages: err_free: kfree(io); err: - percpu_ref_put(&c->writes); - trace_and_count(c, move_extent_alloc_mem_fail, k.k); + this_cpu_inc(c->counters[BCH_COUNTER_move_extent_alloc_mem_fail]); + trace_move_extent_alloc_mem_fail2(c, k); return ret; } @@ -413,13 +478,15 @@ static int move_ratelimit(struct btree_trans *trans, } } while (delay); + /* + * XXX: these limits really ought to be per device, SSDs and hard drives + * will want different limits + */ move_ctxt_wait_event(ctxt, trans, - atomic_read(&ctxt->write_sectors) < - c->opts.move_bytes_in_flight >> 9); - - move_ctxt_wait_event(ctxt, trans, - atomic_read(&ctxt->read_sectors) < - c->opts.move_bytes_in_flight >> 9); + atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 && + atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 && + atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight && + atomic_read(&ctxt->read_ios) < c->opts.move_ios_in_flight); return 0; } @@ -467,9 +534,11 @@ static int __bch2_move_data(struct moving_context *ctxt, bch2_bkey_buf_init(&sk); bch2_trans_init(&trans, c, 0, 0); - ctxt->stats->data_type = BCH_DATA_user; - ctxt->stats->btree_id = btree_id; - ctxt->stats->pos = start; + if (ctxt->stats) { + ctxt->stats->data_type = BCH_DATA_user; + ctxt->stats->btree_id = btree_id; + ctxt->stats->pos = start; + } bch2_trans_iter_init(&trans, &iter, btree_id, start, BTREE_ITER_PREFETCH| @@ -494,7 +563,8 @@ static int __bch2_move_data(struct moving_context *ctxt, if (bkey_ge(bkey_start_pos(k.k), end)) break; - ctxt->stats->pos = iter.pos; + if (ctxt->stats) + ctxt->stats->pos = iter.pos; if (!bkey_extent_is_direct_data(k.k)) goto next_nondata; @@ -513,10 +583,9 @@ static int __bch2_move_data(struct moving_context *ctxt, */ bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - bch2_trans_unlock(&trans); - ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts, - btree_id, k, data_opts); + ret2 = bch2_move_extent(&trans, &iter, ctxt, NULL, + io_opts, btree_id, k, data_opts); if (ret2) { if (bch2_err_matches(ret2, BCH_ERR_transaction_restart)) continue; @@ -534,7 +603,8 @@ static int __bch2_move_data(struct moving_context *ctxt, if (ctxt->rate) bch2_ratelimit_increment(ctxt->rate, k.k->size); next: - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + if (ctxt->stats) + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); next_nondata: bch2_btree_iter_advance(&iter); } @@ -583,104 +653,69 @@ int bch2_move_data(struct bch_fs *c, return ret; } -static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen) -{ - struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k; - struct printbuf buf = PRINTBUF; - struct bch_backpointer bp; - u64 bp_offset = 0; - int ret; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, - bucket, BTREE_ITER_CACHED); -again: - k = bch2_btree_iter_peek_slot(&iter); - ret = bkey_err(k); - - if (!ret && k.k->type == KEY_TYPE_alloc_v4) { - struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); - - if (a.v->gen == gen && - a.v->dirty_sectors) { - if (a.v->data_type == BCH_DATA_btree) { - bch2_trans_unlock(trans); - if (bch2_btree_interior_updates_flush(c)) - goto again; - goto failed_to_evacuate; - } - } - } - - bch2_trans_iter_exit(trans, &iter); - return ret; -failed_to_evacuate: - bch2_trans_iter_exit(trans, &iter); - - prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket ")); - bch2_bkey_val_to_text(&buf, c, k); - - while (1) { - bch2_trans_begin(trans); - - ret = bch2_get_next_backpointer(trans, bucket, gen, - &bp_offset, &bp, - BTREE_ITER_CACHED); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - continue; - if (ret) - break; - if (bp_offset == U64_MAX) - break; - - k = bch2_backpointer_get_key(trans, &iter, - bucket, bp_offset, bp); - ret = bkey_err(k); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - continue; - if (ret) - break; - if (!k.k) - continue; - prt_newline(&buf); - bch2_bkey_val_to_text(&buf, c, k); - bch2_trans_iter_exit(trans, &iter); - } - - bch2_print_string_as_lines(KERN_ERR, buf.buf); - printbuf_exit(&buf); - return 0; -} - -int __bch2_evacuate_bucket(struct moving_context *ctxt, +int __bch2_evacuate_bucket(struct btree_trans *trans, + struct moving_context *ctxt, + struct move_bucket_in_flight *bucket_in_flight, struct bpos bucket, int gen, struct data_update_opts _data_opts) { struct bch_fs *c = ctxt->c; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_trans trans; struct btree_iter iter; struct bkey_buf sk; struct bch_backpointer bp; + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a; + struct bkey_s_c k; struct data_update_opts data_opts; - u64 bp_offset = 0, cur_inum = U64_MAX; + unsigned dirty_sectors, bucket_size; + u64 fragmentation; + u64 cur_inum = U64_MAX; + struct bpos bp_pos = POS_MIN; int ret = 0; + trace_bucket_evacuate(c, &bucket); + bch2_bkey_buf_init(&sk); - bch2_trans_init(&trans, c, 0, 0); - while (!(ret = move_ratelimit(&trans, ctxt))) { - bch2_trans_begin(&trans); + /* + * We're not run in a context that handles transaction restarts: + */ + bch2_trans_begin(trans); + + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, + bucket, BTREE_ITER_CACHED); + ret = lockrestart_do(trans, + bkey_err(k = bch2_btree_iter_peek_slot(&iter))); + bch2_trans_iter_exit(trans, &iter); - ret = bch2_get_next_backpointer(&trans, bucket, gen, - &bp_offset, &bp, + if (ret) { + bch_err(c, "%s: error looking up alloc key: %s", __func__, bch2_err_str(ret)); + goto err; + } + + a = bch2_alloc_to_v4(k, &a_convert); + dirty_sectors = a->dirty_sectors; + bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size; + fragmentation = a->fragmentation_lru; + + ret = bch2_btree_write_buffer_flush(trans); + if (ret) { + bch_err(c, "%s: error flushing btree write buffer: %s", __func__, bch2_err_str(ret)); + goto err; + } + + while (!(ret = move_ratelimit(trans, ctxt))) { + bch2_trans_begin(trans); + + ret = bch2_get_next_backpointer(trans, bucket, gen, + &bp_pos, &bp, BTREE_ITER_CACHED); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) goto err; - if (bp_offset == U64_MAX) + if (bkey_eq(bp_pos, POS_MAX)) break; if (!bp.level) { @@ -688,22 +723,21 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, struct bkey_s_c k; unsigned i = 0; - k = bch2_backpointer_get_key(&trans, &iter, - bucket, bp_offset, bp); + k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) goto err; if (!k.k) - continue; + goto next; bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - ret = move_get_io_opts(&trans, &io_opts, k, &cur_inum); + ret = move_get_io_opts(trans, &io_opts, k, &cur_inum); if (ret) { - bch2_trans_iter_exit(&trans, &iter); + bch2_trans_iter_exit(trans, &iter); continue; } @@ -712,20 +746,26 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, data_opts.rewrite_ptrs = 0; bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { - if (ptr->dev == bucket.inode) + if (ptr->dev == bucket.inode) { data_opts.rewrite_ptrs |= 1U << i; + if (ptr->cached) { + bch2_trans_iter_exit(trans, &iter); + goto next; + } + } i++; } - ret = bch2_move_extent(&trans, &iter, ctxt, io_opts, - bp.btree_id, k, data_opts); - bch2_trans_iter_exit(&trans, &iter); + ret = bch2_move_extent(trans, &iter, ctxt, + bucket_in_flight, + io_opts, bp.btree_id, k, data_opts); + bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret == -ENOMEM) { /* memory allocation failure, wait for some IO to finish */ - bch2_move_ctxt_wait_for_io(ctxt, &trans); + bch2_move_ctxt_wait_for_io(ctxt, trans); continue; } if (ret) @@ -733,12 +773,12 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, if (ctxt->rate) bch2_ratelimit_increment(ctxt->rate, k.k->size); - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + if (ctxt->stats) + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); } else { struct btree *b; - b = bch2_backpointer_get_node(&trans, &iter, - bucket, bp_offset, bp); + b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp); ret = PTR_ERR_OR_ZERO(b); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) continue; @@ -747,10 +787,10 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, if (ret) goto err; if (!b) - continue; + goto next; - ret = bch2_btree_node_rewrite(&trans, &iter, b, 0); - bch2_trans_iter_exit(&trans, &iter); + ret = bch2_btree_node_rewrite(trans, &iter, b, 0); + bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -760,22 +800,17 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, if (ctxt->rate) bch2_ratelimit_increment(ctxt->rate, c->opts.btree_node_size >> 9); - atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); - atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); + if (ctxt->stats) { + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); + } } - - bp_offset++; +next: + bp_pos = bpos_nosnap_successor(bp_pos); } - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && gen >= 0) { - bch2_trans_unlock(&trans); - move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); - closure_sync(&ctxt->cl); - if (!ctxt->write_error) - lockrestart_do(&trans, verify_bucket_evacuated(&trans, bucket, gen)); - } + trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret); err: - bch2_trans_exit(&trans); bch2_bkey_buf_exit(&sk, c); return ret; } @@ -788,12 +823,15 @@ int bch2_evacuate_bucket(struct bch_fs *c, struct write_point_specifier wp, bool wait_on_copygc) { + struct btree_trans trans; struct moving_context ctxt; int ret; + bch2_trans_init(&trans, c, 0, 0); bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); - ret = __bch2_evacuate_bucket(&ctxt, bucket, gen, data_opts); + ret = __bch2_evacuate_bucket(&trans, &ctxt, NULL, bucket, gen, data_opts); bch2_moving_ctxt_exit(&ctxt); + bch2_trans_exit(&trans); return ret; } @@ -1056,3 +1094,67 @@ int bch2_data_job(struct bch_fs *c, return ret; } + +void bch2_data_jobs_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct bch_move_stats *stats; + + mutex_lock(&c->data_progress_lock); + list_for_each_entry(stats, &c->data_progress_list, list) { + prt_printf(out, "%s: data type %s btree_id %s position: ", + stats->name, + bch2_data_types[stats->data_type], + bch2_btree_ids[stats->btree_id]); + bch2_bpos_to_text(out, stats->pos); + prt_printf(out, "%s", "\n"); + } + mutex_unlock(&c->data_progress_lock); +} + +static void bch2_moving_ctxt_to_text(struct printbuf *out, struct moving_context *ctxt) +{ + struct moving_io *io; + + prt_printf(out, "%ps:", ctxt->fn); + prt_newline(out); + printbuf_indent_add(out, 2); + + prt_printf(out, "reads: %u sectors %u", + atomic_read(&ctxt->read_ios), + atomic_read(&ctxt->read_sectors)); + prt_newline(out); + + prt_printf(out, "writes: %u sectors %u", + atomic_read(&ctxt->write_ios), + atomic_read(&ctxt->write_sectors)); + prt_newline(out); + + printbuf_indent_add(out, 2); + + mutex_lock(&ctxt->lock); + list_for_each_entry(io, &ctxt->ios, io_list) { + bch2_write_op_to_text(out, &io->write.op); + } + mutex_unlock(&ctxt->lock); + + printbuf_indent_sub(out, 4); +} + +void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct moving_context *ctxt; + + mutex_lock(&c->moving_context_lock); + list_for_each_entry(ctxt, &c->moving_context_list, list) + bch2_moving_ctxt_to_text(out, ctxt); + mutex_unlock(&c->moving_context_lock); +} + +void bch2_fs_move_init(struct bch_fs *c) +{ + INIT_LIST_HEAD(&c->moving_context_list); + mutex_init(&c->moving_context_lock); + + INIT_LIST_HEAD(&c->data_progress_list); + mutex_init(&c->data_progress_lock); +}