X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fmove.c;h=39a14e3216807d222fc11856fa285448f4a24ca3;hb=a053ebfb8c89e023a44c365e369f4053cfc53376;hp=55fdacad9b9818e8cadf2173f8b57de0fdab69d9;hpb=e0a51ccce8533a91c7cc0cd0adc5662697c9bcfa;p=bcachefs-tools-debian diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 55fdaca..39a14e3 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -1,28 +1,63 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "alloc_background.h" #include "alloc_foreground.h" #include "backpointers.h" #include "bkey_buf.h" #include "btree_gc.h" #include "btree_update.h" #include "btree_update_interior.h" +#include "btree_write_buffer.h" #include "disk_groups.h" #include "ec.h" #include "errcode.h" #include "error.h" #include "inode.h" -#include "io.h" +#include "io_read.h" +#include "io_write.h" #include "journal_reclaim.h" +#include "keylist.h" #include "move.h" #include "replicas.h" #include "super-io.h" -#include "keylist.h" +#include "trace.h" #include #include -#include +static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent(c, buf.buf); + printbuf_exit(&buf); + } +} + +static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_read_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent_read(c, buf.buf); + printbuf_exit(&buf); + } +} + +static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_alloc_mem_fail_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent_alloc_mem_fail(c, buf.buf); + printbuf_exit(&buf); + } +} static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats) { @@ -39,63 +74,71 @@ static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats) } struct moving_io { - struct list_head list; - struct closure cl; - bool read_completed; + struct list_head read_list; + struct list_head io_list; + struct move_bucket_in_flight *b; + struct closure cl; + bool read_completed; - unsigned read_sectors; - unsigned write_sectors; + unsigned read_sectors; + unsigned write_sectors; - struct bch_read_bio rbio; + struct bch_read_bio rbio; - struct data_update write; + struct data_update write; /* Must be last since it is variable size */ - struct bio_vec bi_inline_vecs[0]; + struct bio_vec bi_inline_vecs[0]; }; -static void move_free(struct closure *cl) +static void move_free(struct moving_io *io) { - struct moving_io *io = container_of(cl, struct moving_io, cl); struct moving_context *ctxt = io->write.ctxt; - struct bch_fs *c = ctxt->c; + + if (io->b) + atomic_dec(&io->b->count); bch2_data_update_exit(&io->write); + + mutex_lock(&ctxt->lock); + list_del(&io->io_list); wake_up(&ctxt->wait); - percpu_ref_put(&c->writes); + mutex_unlock(&ctxt->lock); + kfree(io); } -static void move_write_done(struct closure *cl) +static void move_write_done(struct bch_write_op *op) { - struct moving_io *io = container_of(cl, struct moving_io, cl); + struct moving_io *io = container_of(op, struct moving_io, write.op); struct moving_context *ctxt = io->write.ctxt; if (io->write.op.error) ctxt->write_error = true; atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); - closure_return_with_destructor(cl, move_free); + atomic_dec(&io->write.ctxt->write_ios); + move_free(io); + closure_put(&ctxt->cl); } -static void move_write(struct closure *cl) +static void move_write(struct moving_io *io) { - struct moving_io *io = container_of(cl, struct moving_io, cl); - if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) { - closure_return_with_destructor(cl, move_free); + move_free(io); return; } + closure_get(&io->write.ctxt->cl); atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); + atomic_inc(&io->write.ctxt->write_ios); - bch2_data_update_read_done(&io->write, io->rbio.pick.crc, cl); - continue_at(cl, move_write_done, NULL); + bch2_data_update_read_done(&io->write, io->rbio.pick.crc); } -static inline struct moving_io *next_pending_write(struct moving_context *ctxt) +struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt) { struct moving_io *io = - list_first_entry_or_null(&ctxt->reads, struct moving_io, list); + list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list); return io && io->read_completed ? io : NULL; } @@ -106,35 +149,27 @@ static void move_read_endio(struct bio *bio) struct moving_context *ctxt = io->write.ctxt; atomic_sub(io->read_sectors, &ctxt->read_sectors); + atomic_dec(&ctxt->read_ios); io->read_completed = true; wake_up(&ctxt->wait); closure_put(&ctxt->cl); } -static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *trans) +void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt, + struct btree_trans *trans) { struct moving_io *io; if (trans) bch2_trans_unlock(trans); - while ((io = next_pending_write(ctxt))) { - list_del(&io->list); - closure_call(&io->cl, move_write, NULL, &ctxt->cl); + while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) { + list_del(&io->read_list); + move_write(io); } } -#define move_ctxt_wait_event(_ctxt, _trans, _cond) \ -do { \ - do_pending_writes(_ctxt, _trans); \ - \ - if (_cond) \ - break; \ - __wait_event((_ctxt)->wait, \ - next_pending_write(_ctxt) || (_cond)); \ -} while (1) - static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt, struct btree_trans *trans) { @@ -147,17 +182,26 @@ static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt, void bch2_moving_ctxt_exit(struct moving_context *ctxt) { + struct bch_fs *c = ctxt->c; + move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); closure_sync(&ctxt->cl); + EBUG_ON(atomic_read(&ctxt->write_sectors)); + EBUG_ON(atomic_read(&ctxt->write_ios)); + EBUG_ON(atomic_read(&ctxt->read_sectors)); + EBUG_ON(atomic_read(&ctxt->read_ios)); if (ctxt->stats) { - progress_list_del(ctxt->c, ctxt->stats); - - trace_move_data(ctxt->c, + progress_list_del(c, ctxt->stats); + trace_move_data(c, atomic64_read(&ctxt->stats->sectors_moved), atomic64_read(&ctxt->stats->keys_moved)); } + + mutex_lock(&c->moving_context_lock); + list_del(&ctxt->list); + mutex_unlock(&c->moving_context_lock); } void bch2_moving_ctxt_init(struct moving_context *ctxt, @@ -170,22 +214,30 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt, memset(ctxt, 0, sizeof(*ctxt)); ctxt->c = c; + ctxt->fn = (void *) _RET_IP_; ctxt->rate = rate; ctxt->stats = stats; ctxt->wp = wp; ctxt->wait_on_copygc = wait_on_copygc; closure_init_stack(&ctxt->cl); + + mutex_init(&ctxt->lock); INIT_LIST_HEAD(&ctxt->reads); + INIT_LIST_HEAD(&ctxt->ios); init_waitqueue_head(&ctxt->wait); + mutex_lock(&c->moving_context_lock); + list_add(&ctxt->list, &c->moving_context_list); + mutex_unlock(&c->moving_context_lock); + if (stats) { progress_list_add(c, stats); stats->data_type = BCH_DATA_user; } } -void bch_move_stats_init(struct bch_move_stats *stats, char *name) +void bch2_move_stats_init(struct bch_move_stats *stats, char *name) { memset(stats, 0, sizeof(*stats)); scnprintf(stats->name, sizeof(stats->name), "%s", name); @@ -200,13 +252,11 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans, struct bkey_i *n; int ret; - n = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); + n = bch2_bkey_make_mut_noupdate(trans, k); ret = PTR_ERR_OR_ZERO(n); if (ret) return ret; - bkey_reassemble(n, k); - while (data_opts.kill_ptrs) { unsigned i = 0, drop = __fls(data_opts.kill_ptrs); struct bch_extent_ptr *ptr; @@ -231,13 +281,15 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans, if (bkey_deleted(&n->k)) n->k.size = 0; - return bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: + return bch2_trans_relock(trans) ?: + bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL); } static int bch2_move_extent(struct btree_trans *trans, struct btree_iter *iter, struct moving_context *ctxt, + struct move_bucket_in_flight *bucket_in_flight, struct bch_io_opts io_opts, enum btree_id btree_id, struct bkey_s_c k, @@ -251,6 +303,8 @@ static int bch2_move_extent(struct btree_trans *trans, unsigned sectors = k.k->size, pages; int ret = -ENOMEM; + trace_move_extent2(c, k); + bch2_data_update_opts_normalize(k, &data_opts); if (!data_opts.rewrite_ptrs && @@ -260,8 +314,11 @@ static int bch2_move_extent(struct btree_trans *trans, return 0; } - if (!percpu_ref_tryget_live(&c->writes)) - return -EROFS; + /* + * Before memory allocations & taking nocow locks in + * bch2_data_update_init(): + */ + bch2_trans_unlock(trans); /* write path might have to decompress data: */ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) @@ -273,6 +330,7 @@ static int bch2_move_extent(struct btree_trans *trans, if (!io) goto err; + INIT_LIST_HEAD(&io->io_list); io->write.ctxt = ctxt; io->read_sectors = k.k->size; io->write_sectors = k.k->size; @@ -292,25 +350,47 @@ static int bch2_move_extent(struct btree_trans *trans, bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); io->rbio.bio.bi_iter.bi_size = sectors << 9; - bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0); + io->rbio.bio.bi_opf = REQ_OP_READ; io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); io->rbio.bio.bi_end_io = move_read_endio; - ret = bch2_data_update_init(c, &io->write, ctxt->wp, io_opts, - data_opts, btree_id, k); - if (ret) + ret = bch2_data_update_init(trans, ctxt, &io->write, ctxt->wp, + io_opts, data_opts, btree_id, k); + if (ret && ret != -BCH_ERR_unwritten_extent_update) goto err_free_pages; + if (ret == -BCH_ERR_unwritten_extent_update) { + bch2_update_unwritten_extent(trans, &io->write); + move_free(io); + return 0; + } + + BUG_ON(ret); + io->write.ctxt = ctxt; + io->write.op.end_io = move_write_done; + + if (ctxt->stats) { + atomic64_inc(&ctxt->stats->keys_moved); + atomic64_add(k.k->size, &ctxt->stats->sectors_moved); + } + + if (bucket_in_flight) { + io->b = bucket_in_flight; + atomic_inc(&io->b->count); + } - atomic64_inc(&ctxt->stats->keys_moved); - atomic64_add(k.k->size, &ctxt->stats->sectors_moved); this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size); - trace_move_extent_read(k.k); + trace_move_extent_read2(c, k); + mutex_lock(&ctxt->lock); atomic_add(io->read_sectors, &ctxt->read_sectors); - list_add_tail(&io->list, &ctxt->reads); + atomic_inc(&ctxt->read_ios); + + list_add_tail(&io->read_list, &ctxt->reads); + list_add_tail(&io->io_list, &ctxt->ios); + mutex_unlock(&ctxt->lock); /* * dropped by move_read_endio() - guards against use after free of @@ -328,8 +408,8 @@ err_free_pages: err_free: kfree(io); err: - percpu_ref_put(&c->writes); - trace_and_count(c, move_extent_alloc_mem_fail, k.k); + this_cpu_inc(c->counters[BCH_COUNTER_move_extent_alloc_mem_fail]); + trace_move_extent_alloc_mem_fail2(c, k); return ret; } @@ -347,8 +427,8 @@ static int lookup_inode(struct btree_trans *trans, struct bpos pos, if (ret) goto err; - if (!k.k || bkey_cmp(k.k->p, pos)) { - ret = -ENOENT; + if (!k.k || !bkey_eq(k.k->p, pos)) { + ret = -BCH_ERR_ENOENT_inode; goto err; } @@ -399,13 +479,15 @@ static int move_ratelimit(struct btree_trans *trans, } } while (delay); + /* + * XXX: these limits really ought to be per device, SSDs and hard drives + * will want different limits + */ move_ctxt_wait_event(ctxt, trans, - atomic_read(&ctxt->write_sectors) < - c->opts.move_bytes_in_flight >> 9); - - move_ctxt_wait_event(ctxt, trans, - atomic_read(&ctxt->read_sectors) < - c->opts.move_bytes_in_flight >> 9); + atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 && + atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 && + atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight && + atomic_read(&ctxt->read_ios) < c->opts.move_ios_in_flight); return 0; } @@ -420,8 +502,6 @@ static int move_get_io_opts(struct btree_trans *trans, if (*cur_inum == k.k->p.inode) return 0; - *io_opts = bch2_opts_to_inode_opts(trans->c->opts); - ret = lookup_inode(trans, SPOS(0, k.k->p.inode, k.k->p.snapshot), &inode); @@ -429,8 +509,9 @@ static int move_get_io_opts(struct btree_trans *trans, return ret; if (!ret) - bch2_io_opts_apply(io_opts, bch2_inode_opts_get(&inode)); - + bch2_inode_opts_get(io_opts, trans->c, &inode); + else + *io_opts = bch2_opts_to_inode_opts(trans->c->opts); *cur_inum = k.k->p.inode; return 0; } @@ -444,7 +525,7 @@ static int __bch2_move_data(struct moving_context *ctxt, struct bch_fs *c = ctxt->c; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); struct bkey_buf sk; - struct btree_trans trans; + struct btree_trans *trans = bch2_trans_get(c); struct btree_iter iter; struct bkey_s_c k; struct data_update_opts data_opts; @@ -452,21 +533,22 @@ static int __bch2_move_data(struct moving_context *ctxt, int ret = 0, ret2; bch2_bkey_buf_init(&sk); - bch2_trans_init(&trans, c, 0, 0); - ctxt->stats->data_type = BCH_DATA_user; - ctxt->stats->btree_id = btree_id; - ctxt->stats->pos = start; + if (ctxt->stats) { + ctxt->stats->data_type = BCH_DATA_user; + ctxt->stats->btree_id = btree_id; + ctxt->stats->pos = start; + } - bch2_trans_iter_init(&trans, &iter, btree_id, start, + bch2_trans_iter_init(trans, &iter, btree_id, start, BTREE_ITER_PREFETCH| BTREE_ITER_ALL_SNAPSHOTS); if (ctxt->rate) bch2_ratelimit_reset(ctxt->rate); - while (!move_ratelimit(&trans, ctxt)) { - bch2_trans_begin(&trans); + while (!move_ratelimit(trans, ctxt)) { + bch2_trans_begin(trans); k = bch2_btree_iter_peek(&iter); if (!k.k) @@ -478,15 +560,16 @@ static int __bch2_move_data(struct moving_context *ctxt, if (ret) break; - if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) + if (bkey_ge(bkey_start_pos(k.k), end)) break; - ctxt->stats->pos = iter.pos; + if (ctxt->stats) + ctxt->stats->pos = iter.pos; if (!bkey_extent_is_direct_data(k.k)) goto next_nondata; - ret = move_get_io_opts(&trans, &io_opts, k, &cur_inum); + ret = move_get_io_opts(trans, &io_opts, k, &cur_inum); if (ret) continue; @@ -497,19 +580,19 @@ static int __bch2_move_data(struct moving_context *ctxt, /* * The iterator gets unlocked by __bch2_read_extent - need to * save a copy of @k elsewhere: - */ + */ bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts, - btree_id, k, data_opts); + ret2 = bch2_move_extent(trans, &iter, ctxt, NULL, + io_opts, btree_id, k, data_opts); if (ret2) { if (bch2_err_matches(ret2, BCH_ERR_transaction_restart)) continue; if (ret2 == -ENOMEM) { /* memory allocation failure, wait for some IO to finish */ - bch2_move_ctxt_wait_for_io(ctxt, &trans); + bch2_move_ctxt_wait_for_io(ctxt, trans); continue; } @@ -520,13 +603,14 @@ static int __bch2_move_data(struct moving_context *ctxt, if (ctxt->rate) bch2_ratelimit_increment(ctxt->rate, k.k->size); next: - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + if (ctxt->stats) + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); next_nondata: bch2_btree_iter_advance(&iter); } - bch2_trans_iter_exit(&trans, &iter); - bch2_trans_exit(&trans); + bch2_trans_iter_exit(trans, &iter); + bch2_trans_put(trans); bch2_bkey_buf_exit(&sk, c); return ret; @@ -543,12 +627,12 @@ int bch2_move_data(struct bch_fs *c, { struct moving_context ctxt; enum btree_id id; - int ret; + int ret = 0; bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); for (id = start_btree_id; - id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1); + id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1); id++) { stats->btree_id = id; @@ -556,6 +640,9 @@ int bch2_move_data(struct bch_fs *c, id != BTREE_ID_reflink) continue; + if (!bch2_btree_id_root(c, id)->b) + continue; + ret = __bch2_move_data(&ctxt, id == start_btree_id ? start_pos : POS_MIN, id == end_btree_id ? end_pos : POS_MAX, @@ -569,95 +656,90 @@ int bch2_move_data(struct bch_fs *c, return ret; } -static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen) -{ - struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k; - int ret; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, - bucket, BTREE_ITER_CACHED); -again: - k = bch2_btree_iter_peek_slot(&iter); - ret = bkey_err(k); - - if (!ret && k.k->type == KEY_TYPE_alloc_v4) { - struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); - - if (a.v->gen == gen && - a.v->dirty_sectors) { - struct printbuf buf = PRINTBUF; - - if (a.v->data_type == BCH_DATA_btree) { - bch2_trans_unlock(trans); - if (bch2_btree_interior_updates_flush(c)) - goto again; - } - - prt_str(&buf, "failed to evacuate bucket "); - bch2_bkey_val_to_text(&buf, c, k); - - bch_err(c, "%s", buf.buf); - printbuf_exit(&buf); - } - } - - bch2_trans_iter_exit(trans, &iter); - return ret; -} - -int __bch2_evacuate_bucket(struct moving_context *ctxt, +int __bch2_evacuate_bucket(struct btree_trans *trans, + struct moving_context *ctxt, + struct move_bucket_in_flight *bucket_in_flight, struct bpos bucket, int gen, struct data_update_opts _data_opts) { struct bch_fs *c = ctxt->c; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_trans trans; struct btree_iter iter; struct bkey_buf sk; struct bch_backpointer bp; + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a; + struct bkey_s_c k; struct data_update_opts data_opts; - u64 bp_offset = 0, cur_inum = U64_MAX; + unsigned dirty_sectors, bucket_size; + u64 fragmentation; + u64 cur_inum = U64_MAX; + struct bpos bp_pos = POS_MIN; int ret = 0; + trace_bucket_evacuate(c, &bucket); + bch2_bkey_buf_init(&sk); - bch2_trans_init(&trans, c, 0, 0); - while (!(ret = move_ratelimit(&trans, ctxt))) { - bch2_trans_begin(&trans); + /* + * We're not run in a context that handles transaction restarts: + */ + bch2_trans_begin(trans); + + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, + bucket, BTREE_ITER_CACHED); + ret = lockrestart_do(trans, + bkey_err(k = bch2_btree_iter_peek_slot(&iter))); + bch2_trans_iter_exit(trans, &iter); + + if (ret) { + bch_err_msg(c, ret, "looking up alloc key"); + goto err; + } - ret = bch2_get_next_backpointer(&trans, bucket, gen, - &bp_offset, &bp, + a = bch2_alloc_to_v4(k, &a_convert); + dirty_sectors = a->dirty_sectors; + bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size; + fragmentation = a->fragmentation_lru; + + ret = bch2_btree_write_buffer_flush(trans); + if (ret) { + bch_err_msg(c, ret, "flushing btree write buffer"); + goto err; + } + + while (!(ret = move_ratelimit(trans, ctxt))) { + bch2_trans_begin(trans); + + ret = bch2_get_next_backpointer(trans, bucket, gen, + &bp_pos, &bp, BTREE_ITER_CACHED); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) goto err; - if (bp_offset == U64_MAX) + if (bkey_eq(bp_pos, POS_MAX)) break; if (!bp.level) { const struct bch_extent_ptr *ptr; - struct bkey_s_c k; unsigned i = 0; - k = bch2_backpointer_get_key(&trans, &iter, - bucket, bp_offset, bp); + k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) goto err; if (!k.k) - continue; + goto next; bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - ret = move_get_io_opts(&trans, &io_opts, k, &cur_inum); + ret = move_get_io_opts(trans, &io_opts, k, &cur_inum); if (ret) { - bch2_trans_iter_exit(&trans, &iter); + bch2_trans_iter_exit(trans, &iter); continue; } @@ -666,20 +748,26 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, data_opts.rewrite_ptrs = 0; bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { - if (ptr->dev == bucket.inode) + if (ptr->dev == bucket.inode) { data_opts.rewrite_ptrs |= 1U << i; + if (ptr->cached) { + bch2_trans_iter_exit(trans, &iter); + goto next; + } + } i++; } - ret = bch2_move_extent(&trans, &iter, ctxt, io_opts, - bp.btree_id, k, data_opts); - bch2_trans_iter_exit(&trans, &iter); + ret = bch2_move_extent(trans, &iter, ctxt, + bucket_in_flight, + io_opts, bp.btree_id, k, data_opts); + bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret == -ENOMEM) { /* memory allocation failure, wait for some IO to finish */ - bch2_move_ctxt_wait_for_io(ctxt, &trans); + bch2_move_ctxt_wait_for_io(ctxt, trans); continue; } if (ret) @@ -687,12 +775,12 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, if (ctxt->rate) bch2_ratelimit_increment(ctxt->rate, k.k->size); - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + if (ctxt->stats) + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); } else { struct btree *b; - b = bch2_backpointer_get_node(&trans, &iter, - bucket, bp_offset, bp); + b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp); ret = PTR_ERR_OR_ZERO(b); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) continue; @@ -701,10 +789,10 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, if (ret) goto err; if (!b) - continue; + goto next; - ret = bch2_btree_node_rewrite(&trans, &iter, b, 0); - bch2_trans_iter_exit(&trans, &iter); + ret = bch2_btree_node_rewrite(trans, &iter, b, 0); + bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -714,22 +802,17 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, if (ctxt->rate) bch2_ratelimit_increment(ctxt->rate, c->opts.btree_node_size >> 9); - atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); - atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); + if (ctxt->stats) { + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); + } } - - bp_offset++; +next: + bp_pos = bpos_nosnap_successor(bp_pos); } - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && gen >= 0) { - bch2_trans_unlock(&trans); - move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); - closure_sync(&ctxt->cl); - if (!ctxt->write_error) - lockrestart_do(&trans, verify_bucket_evacuated(&trans, bucket, gen)); - } + trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret); err: - bch2_trans_exit(&trans); bch2_bkey_buf_exit(&sk, c); return ret; } @@ -742,12 +825,14 @@ int bch2_evacuate_bucket(struct bch_fs *c, struct write_point_specifier wp, bool wait_on_copygc) { + struct btree_trans *trans = bch2_trans_get(c); struct moving_context ctxt; int ret; bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); - ret = __bch2_evacuate_bucket(&ctxt, bucket, gen, data_opts); + ret = __bch2_evacuate_bucket(trans, &ctxt, NULL, bucket, gen, data_opts); bch2_moving_ctxt_exit(&ctxt); + bch2_trans_put(trans); return ret; } @@ -764,28 +849,30 @@ static int bch2_move_btree(struct bch_fs *c, { bool kthread = (current->flags & PF_KTHREAD) != 0; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_trans trans; + struct btree_trans *trans = bch2_trans_get(c); struct btree_iter iter; struct btree *b; enum btree_id id; struct data_update_opts data_opts; int ret = 0; - bch2_trans_init(&trans, c, 0, 0); progress_list_add(c, stats); stats->data_type = BCH_DATA_btree; for (id = start_btree_id; - id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1); + id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1); id++) { stats->btree_id = id; - bch2_trans_node_iter_init(&trans, &iter, id, POS_MIN, 0, 0, + if (!bch2_btree_id_root(c, id)->b) + continue; + + bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0, BTREE_ITER_PREFETCH); retry: ret = 0; - while (bch2_trans_begin(&trans), + while (bch2_trans_begin(trans), (b = bch2_btree_iter_peek_node(&iter)) && !(ret = PTR_ERR_OR_ZERO(b))) { if (kthread && kthread_should_stop()) @@ -800,7 +887,7 @@ retry: if (!pred(c, arg, b, &io_opts, &data_opts)) goto next; - ret = bch2_btree_node_rewrite(&trans, &iter, b, 0) ?: ret; + ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret; if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) @@ -811,16 +898,16 @@ next: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; - bch2_trans_iter_exit(&trans, &iter); + bch2_trans_iter_exit(trans, &iter); if (kthread && kthread_should_stop()) break; } - bch2_trans_exit(&trans); + bch2_trans_put(trans); if (ret) - bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret)); + bch_err_fn(c, ret); bch2_btree_interior_updates_flush(c); @@ -868,7 +955,7 @@ static bool migrate_pred(struct bch_fs *c, void *arg, i++; } - return data_opts->rewrite_ptrs != 0;; + return data_opts->rewrite_ptrs != 0; } static bool rereplicate_btree_pred(struct bch_fs *c, void *arg, @@ -945,6 +1032,8 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) mutex_unlock(&c->sb_lock); } + if (ret) + bch_err_fn(c, ret); return ret; } @@ -956,7 +1045,7 @@ int bch2_data_job(struct bch_fs *c, switch (op.op) { case BCH_DATA_OP_REREPLICATE: - bch_move_stats_init(stats, "rereplicate"); + bch2_move_stats_init(stats, "rereplicate"); stats->data_type = BCH_DATA_journal; ret = bch2_journal_flush_device_pins(&c->journal, -1); @@ -980,7 +1069,7 @@ int bch2_data_job(struct bch_fs *c, if (op.migrate.dev >= c->sb.nr_devices) return -EINVAL; - bch_move_stats_init(stats, "migrate"); + bch2_move_stats_init(stats, "migrate"); stats->data_type = BCH_DATA_journal; ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); @@ -1001,7 +1090,7 @@ int bch2_data_job(struct bch_fs *c, ret = bch2_replicas_gc2(c) ?: ret; break; case BCH_DATA_OP_REWRITE_OLD_NODES: - bch_move_stats_init(stats, "rewrite_old_nodes"); + bch2_move_stats_init(stats, "rewrite_old_nodes"); ret = bch2_scan_old_btree_nodes(c, stats); break; default: @@ -1010,3 +1099,61 @@ int bch2_data_job(struct bch_fs *c, return ret; } + +static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt) +{ + struct bch_move_stats *stats = ctxt->stats; + struct moving_io *io; + + prt_printf(out, "%s (%ps):", stats->name, ctxt->fn); + prt_newline(out); + + prt_printf(out, " data type %s btree_id %s position: ", + bch2_data_types[stats->data_type], + bch2_btree_ids[stats->btree_id]); + bch2_bpos_to_text(out, stats->pos); + prt_newline(out); + printbuf_indent_add(out, 2); + + prt_printf(out, "reads: ios %u/%u sectors %u/%u", + atomic_read(&ctxt->read_ios), + c->opts.move_ios_in_flight, + atomic_read(&ctxt->read_sectors), + c->opts.move_bytes_in_flight >> 9); + prt_newline(out); + + prt_printf(out, "writes: ios %u/%u sectors %u/%u", + atomic_read(&ctxt->write_ios), + c->opts.move_ios_in_flight, + atomic_read(&ctxt->write_sectors), + c->opts.move_bytes_in_flight >> 9); + prt_newline(out); + + printbuf_indent_add(out, 2); + + mutex_lock(&ctxt->lock); + list_for_each_entry(io, &ctxt->ios, io_list) + bch2_write_op_to_text(out, &io->write.op); + mutex_unlock(&ctxt->lock); + + printbuf_indent_sub(out, 4); +} + +void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct moving_context *ctxt; + + mutex_lock(&c->moving_context_lock); + list_for_each_entry(ctxt, &c->moving_context_list, list) + bch2_moving_ctxt_to_text(out, c, ctxt); + mutex_unlock(&c->moving_context_lock); +} + +void bch2_fs_move_init(struct bch_fs *c) +{ + INIT_LIST_HEAD(&c->moving_context_list); + mutex_init(&c->moving_context_lock); + + INIT_LIST_HEAD(&c->data_progress_list); + mutex_init(&c->data_progress_lock); +}