X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fmove.c;h=bf68ea49447b95055a4f6a1e6e7c6a7e373aebc5;hb=HEAD;hp=4ef7595fa6a2d5f43c098b40d10a5ca73ae029cf;hpb=30cca2e94d0dfa8c3151daf1393f402d32bb9407;p=bcachefs-tools-debian diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 4ef7595..bf68ea4 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -6,63 +6,123 @@ #include "backpointers.h" #include "bkey_buf.h" #include "btree_gc.h" +#include "btree_io.h" #include "btree_update.h" #include "btree_update_interior.h" #include "btree_write_buffer.h" +#include "compress.h" #include "disk_groups.h" #include "ec.h" #include "errcode.h" #include "error.h" #include "inode.h" -#include "io.h" +#include "io_read.h" +#include "io_write.h" #include "journal_reclaim.h" +#include "keylist.h" #include "move.h" #include "replicas.h" +#include "snapshot.h" #include "super-io.h" -#include "keylist.h" +#include "trace.h" #include #include -#include +const char * const bch2_data_ops_strs[] = { +#define x(t, n, ...) [n] = #t, + BCH_DATA_OPS() +#undef x + NULL +}; -static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats) +static void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) { - mutex_lock(&c->data_progress_lock); - list_add(&stats->list, &c->data_progress_list); - mutex_unlock(&c->data_progress_lock); + printbuf_tabstop_push(out, 20); + prt_str(out, "rewrite ptrs:"); + prt_tab(out); + bch2_prt_u64_base2(out, data_opts->rewrite_ptrs); + prt_newline(out); + + prt_str(out, "kill ptrs: "); + prt_tab(out); + bch2_prt_u64_base2(out, data_opts->kill_ptrs); + prt_newline(out); + + prt_str(out, "target: "); + prt_tab(out); + bch2_target_to_text(out, c, data_opts->target); + prt_newline(out); + + prt_str(out, "compression: "); + prt_tab(out); + bch2_compression_opt_to_text(out, background_compression(*io_opts)); + prt_newline(out); + + prt_str(out, "extra replicas: "); + prt_tab(out); + prt_u64(out, data_opts->extra_replicas); } -static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats) +static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) { - mutex_lock(&c->data_progress_lock); - list_del(&stats->list); - mutex_unlock(&c->data_progress_lock); + if (trace_move_extent_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + prt_newline(&buf); + bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts); + trace_move_extent(c, buf.buf); + printbuf_exit(&buf); + } +} + +static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k) +{ + if (trace_move_extent_read_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + trace_move_extent_read(c, buf.buf); + printbuf_exit(&buf); + } } struct moving_io { - struct list_head list; - struct closure cl; - bool read_completed; + struct list_head read_list; + struct list_head io_list; + struct move_bucket_in_flight *b; + struct closure cl; + bool read_completed; - unsigned read_sectors; - unsigned write_sectors; + unsigned read_sectors; + unsigned write_sectors; - struct bch_read_bio rbio; + struct bch_read_bio rbio; - struct data_update write; + struct data_update write; /* Must be last since it is variable size */ - struct bio_vec bi_inline_vecs[0]; + struct bio_vec bi_inline_vecs[]; }; static void move_free(struct moving_io *io) { struct moving_context *ctxt = io->write.ctxt; - struct bch_fs *c = ctxt->c; + + if (io->b) + atomic_dec(&io->b->count); bch2_data_update_exit(&io->write); + + mutex_lock(&ctxt->lock); + list_del(&io->io_list); wake_up(&ctxt->wait); - bch2_write_ref_put(c, BCH_WRITE_REF_move); + mutex_unlock(&ctxt->lock); + kfree(io); } @@ -87,6 +147,15 @@ static void move_write(struct moving_io *io) return; } + if (trace_move_extent_write_enabled()) { + struct bch_fs *c = io->write.op.c; + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k)); + trace_move_extent_write(c, buf.buf); + printbuf_exit(&buf); + } + closure_get(&io->write.ctxt->cl); atomic_add(io->write_sectors, &io->write.ctxt->write_sectors); atomic_inc(&io->write.ctxt->write_ios); @@ -97,7 +166,7 @@ static void move_write(struct moving_io *io) struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt) { struct moving_io *io = - list_first_entry_or_null(&ctxt->reads, struct moving_io, list); + list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list); return io && io->read_completed ? io : NULL; } @@ -115,47 +184,50 @@ static void move_read_endio(struct bio *bio) closure_put(&ctxt->cl); } -void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt, - struct btree_trans *trans) +void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt) { struct moving_io *io; - if (trans) - bch2_trans_unlock(trans); - while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) { - list_del(&io->list); + bch2_trans_unlock_long(ctxt->trans); + list_del(&io->read_list); move_write(io); } } -static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt, - struct btree_trans *trans) +void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) { unsigned sectors_pending = atomic_read(&ctxt->write_sectors); - move_ctxt_wait_event(ctxt, trans, + move_ctxt_wait_event(ctxt, !atomic_read(&ctxt->write_sectors) || atomic_read(&ctxt->write_sectors) != sectors_pending); } -void bch2_moving_ctxt_exit(struct moving_context *ctxt) +void bch2_moving_ctxt_flush_all(struct moving_context *ctxt) { - move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); + move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads)); + bch2_trans_unlock_long(ctxt->trans); closure_sync(&ctxt->cl); +} + +void bch2_moving_ctxt_exit(struct moving_context *ctxt) +{ + struct bch_fs *c = ctxt->trans->c; + + bch2_moving_ctxt_flush_all(ctxt); EBUG_ON(atomic_read(&ctxt->write_sectors)); EBUG_ON(atomic_read(&ctxt->write_ios)); EBUG_ON(atomic_read(&ctxt->read_sectors)); EBUG_ON(atomic_read(&ctxt->read_ios)); - if (ctxt->stats) { - progress_list_del(ctxt->c, ctxt->stats); + mutex_lock(&c->moving_context_lock); + list_del(&ctxt->list); + mutex_unlock(&c->moving_context_lock); - trace_move_data(ctxt->c, - atomic64_read(&ctxt->stats->sectors_moved), - atomic64_read(&ctxt->stats->keys_moved)); - } + bch2_trans_put(ctxt->trans); + memset(ctxt, 0, sizeof(*ctxt)); } void bch2_moving_ctxt_init(struct moving_context *ctxt, @@ -167,79 +239,45 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt, { memset(ctxt, 0, sizeof(*ctxt)); - ctxt->c = c; + ctxt->trans = bch2_trans_get(c); + ctxt->fn = (void *) _RET_IP_; ctxt->rate = rate; ctxt->stats = stats; ctxt->wp = wp; ctxt->wait_on_copygc = wait_on_copygc; closure_init_stack(&ctxt->cl); + + mutex_init(&ctxt->lock); INIT_LIST_HEAD(&ctxt->reads); + INIT_LIST_HEAD(&ctxt->ios); init_waitqueue_head(&ctxt->wait); - if (stats) { - progress_list_add(c, stats); - stats->data_type = BCH_DATA_user; - } + mutex_lock(&c->moving_context_lock); + list_add(&ctxt->list, &c->moving_context_list); + mutex_unlock(&c->moving_context_lock); } -void bch2_move_stats_init(struct bch_move_stats *stats, char *name) +void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c) { - memset(stats, 0, sizeof(*stats)); - scnprintf(stats->name, sizeof(stats->name), "%s", name); + trace_move_data(c, stats); } -static int bch2_extent_drop_ptrs(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_s_c k, - struct data_update_opts data_opts) +void bch2_move_stats_init(struct bch_move_stats *stats, const char *name) { - struct bch_fs *c = trans->c; - struct bkey_i *n; - int ret; - - n = bch2_bkey_make_mut(trans, k); - ret = PTR_ERR_OR_ZERO(n); - if (ret) - return ret; - - while (data_opts.kill_ptrs) { - unsigned i = 0, drop = __fls(data_opts.kill_ptrs); - struct bch_extent_ptr *ptr; - - bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop); - data_opts.kill_ptrs ^= 1U << drop; - } - - /* - * If the new extent no longer has any pointers, bch2_extent_normalize() - * will do the appropriate thing with it (turning it into a - * KEY_TYPE_error key, or just a discard if it was a cached extent) - */ - bch2_extent_normalize(c, bkey_i_to_s(n)); - - /* - * Since we're not inserting through an extent iterator - * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators), - * we aren't using the extent overwrite path to delete, we're - * just using the normal key deletion path: - */ - if (bkey_deleted(&n->k)) - n->k.size = 0; - - return bch2_trans_relock(trans) ?: - bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: - bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL); + memset(stats, 0, sizeof(*stats)); + stats->data_type = BCH_DATA_user; + scnprintf(stats->name, sizeof(stats->name), "%s", name); } -static int bch2_move_extent(struct btree_trans *trans, - struct btree_iter *iter, - struct moving_context *ctxt, - struct bch_io_opts io_opts, - enum btree_id btree_id, - struct bkey_s_c k, - struct data_update_opts data_opts) +int bch2_move_extent(struct moving_context *ctxt, + struct move_bucket_in_flight *bucket_in_flight, + struct btree_iter *iter, + struct bkey_s_c k, + struct bch_io_opts io_opts, + struct data_update_opts data_opts) { + struct btree_trans *trans = ctxt->trans; struct bch_fs *c = trans->c; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct moving_io *io; @@ -248,6 +286,11 @@ static int bch2_move_extent(struct btree_trans *trans, unsigned sectors = k.k->size, pages; int ret = -ENOMEM; + trace_move_extent2(c, k, &io_opts, &data_opts); + + if (ctxt->stats) + ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos); + bch2_data_update_opts_normalize(k, &data_opts); if (!data_opts.rewrite_ptrs && @@ -257,9 +300,6 @@ static int bch2_move_extent(struct btree_trans *trans, return 0; } - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_move)) - return -BCH_ERR_erofs_no_writes; - /* * Before memory allocations & taking nocow locks in * bch2_data_update_init(): @@ -276,6 +316,7 @@ static int bch2_move_extent(struct btree_trans *trans, if (!io) goto err; + INIT_LIST_HEAD(&io->io_list); io->write.ctxt = ctxt; io->read_sectors = k.k->size; io->write_sectors = k.k->size; @@ -295,36 +336,41 @@ static int bch2_move_extent(struct btree_trans *trans, bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); io->rbio.bio.bi_iter.bi_size = sectors << 9; - bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0); + io->rbio.bio.bi_opf = REQ_OP_READ; io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); io->rbio.bio.bi_end_io = move_read_endio; - ret = bch2_data_update_init(trans, ctxt, &io->write, ctxt->wp, - io_opts, data_opts, btree_id, k); - if (ret && ret != -BCH_ERR_unwritten_extent_update) + ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp, + io_opts, data_opts, iter->btree_id, k); + if (ret) goto err_free_pages; - io->write.ctxt = ctxt; io->write.op.end_io = move_write_done; - atomic64_inc(&ctxt->stats->keys_moved); - atomic64_add(k.k->size, &ctxt->stats->sectors_moved); + if (ctxt->rate) + bch2_ratelimit_increment(ctxt->rate, k.k->size); - if (ret == -BCH_ERR_unwritten_extent_update) { - bch2_update_unwritten_extent(trans, &io->write); - move_free(io); - return 0; + if (ctxt->stats) { + atomic64_inc(&ctxt->stats->keys_moved); + atomic64_add(k.k->size, &ctxt->stats->sectors_moved); } - BUG_ON(ret); + if (bucket_in_flight) { + io->b = bucket_in_flight; + atomic_inc(&io->b->count); + } this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size); - trace_move_extent_read(k.k); + trace_move_extent_read2(c, k); + mutex_lock(&ctxt->lock); atomic_add(io->read_sectors, &ctxt->read_sectors); atomic_inc(&ctxt->read_ios); - list_add_tail(&io->list, &ctxt->reads); + + list_add_tail(&io->read_list, &ctxt->reads); + list_add_tail(&io->io_list, &ctxt->ios); + mutex_unlock(&ctxt->lock); /* * dropped by move_read_endio() - guards against use after free of @@ -333,7 +379,7 @@ static int bch2_move_extent(struct btree_trans *trans, closure_get(&ctxt->cl); bch2_read_extent(trans, &io->rbio, bkey_start_pos(k.k), - btree_id, k, 0, + iter->btree_id, k, 0, BCH_READ_NODECODE| BCH_READ_LAST_FRAGMENT); return 0; @@ -342,73 +388,129 @@ err_free_pages: err_free: kfree(io); err: - bch2_write_ref_put(c, BCH_WRITE_REF_move); - trace_and_count(c, move_extent_alloc_mem_fail, k.k); + if (ret == -BCH_ERR_data_update_done) + return 0; + + if (bch2_err_matches(ret, EROFS) || + bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ret; + + count_event(c, move_extent_start_fail); + + if (trace_move_extent_start_fail_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, k); + prt_str(&buf, ": "); + prt_str(&buf, bch2_err_str(ret)); + trace_move_extent_start_fail(c, buf.buf); + printbuf_exit(&buf); + } return ret; } -static int lookup_inode(struct btree_trans *trans, struct bpos pos, - struct bch_inode_unpacked *inode) +struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, + struct per_snapshot_io_opts *io_opts, + struct bkey_s_c extent_k) +{ + struct bch_fs *c = trans->c; + u32 restart_count = trans->restart_count; + int ret = 0; + + if (io_opts->cur_inum != extent_k.k->p.inode) { + io_opts->d.nr = 0; + + ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode), + BTREE_ITER_ALL_SNAPSHOTS, k, ({ + if (k.k->p.offset != extent_k.k->p.inode) + break; + + if (!bkey_is_inode(k.k)) + continue; + + struct bch_inode_unpacked inode; + BUG_ON(bch2_inode_unpack(k, &inode)); + + struct snapshot_io_opts_entry e = { .snapshot = k.k->p.snapshot }; + bch2_inode_opts_get(&e.io_opts, trans->c, &inode); + + darray_push(&io_opts->d, e); + })); + io_opts->cur_inum = extent_k.k->p.inode; + } + + ret = ret ?: trans_was_restarted(trans, restart_count); + if (ret) + return ERR_PTR(ret); + + if (extent_k.k->p.snapshot) + darray_for_each(io_opts->d, i) + if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot)) + return &i->io_opts; + + return &io_opts->fs_io_opts; +} + +int bch2_move_get_io_opts_one(struct btree_trans *trans, + struct bch_io_opts *io_opts, + struct bkey_s_c extent_k) { struct btree_iter iter; struct bkey_s_c k; int ret; - bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, pos, - BTREE_ITER_ALL_SNAPSHOTS); - k = bch2_btree_iter_peek(&iter); + /* reflink btree? */ + if (!extent_k.k->p.inode) { + *io_opts = bch2_opts_to_inode_opts(trans->c->opts); + return 0; + } + + k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot), + BTREE_ITER_CACHED); ret = bkey_err(k); - if (ret) - goto err; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ret; - if (!k.k || !bkey_eq(k.k->p, pos)) { - ret = -ENOENT; - goto err; + if (!ret && bkey_is_inode(k.k)) { + struct bch_inode_unpacked inode; + bch2_inode_unpack(k, &inode); + bch2_inode_opts_get(io_opts, trans->c, &inode); + } else { + *io_opts = bch2_opts_to_inode_opts(trans->c->opts); } - ret = bkey_is_inode(k.k) ? 0 : -EIO; - if (ret) - goto err; - - ret = bch2_inode_unpack(k, inode); - if (ret) - goto err; -err: bch2_trans_iter_exit(trans, &iter); - return ret; + return 0; } -static int move_ratelimit(struct btree_trans *trans, - struct moving_context *ctxt) +int bch2_move_ratelimit(struct moving_context *ctxt) { - struct bch_fs *c = trans->c; + struct bch_fs *c = ctxt->trans->c; + bool is_kthread = current->flags & PF_KTHREAD; u64 delay; - if (ctxt->wait_on_copygc) { - bch2_trans_unlock(trans); + if (ctxt->wait_on_copygc && c->copygc_running) { + bch2_moving_ctxt_flush_all(ctxt); wait_event_killable(c->copygc_running_wq, !c->copygc_running || - kthread_should_stop()); + (is_kthread && kthread_should_stop())); } do { delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0; - if (delay) { - bch2_trans_unlock(trans); - set_current_state(TASK_INTERRUPTIBLE); - } - - if ((current->flags & PF_KTHREAD) && kthread_should_stop()) { - __set_current_state(TASK_RUNNING); + if (is_kthread && kthread_should_stop()) return 1; - } if (delay) - schedule_timeout(delay); + move_ctxt_wait_event_timeout(ctxt, + freezing(current) || + (is_kthread && kthread_should_stop()), + delay); if (unlikely(freezing(current))) { - move_ctxt_wait_event(ctxt, trans, list_empty(&ctxt->reads)); + bch2_moving_ctxt_flush_all(ctxt); try_to_freeze(); } } while (delay); @@ -417,7 +519,7 @@ static int move_ratelimit(struct btree_trans *trans, * XXX: these limits really ought to be per device, SSDs and hard drives * will want different limits */ - move_ctxt_wait_event(ctxt, trans, + move_ctxt_wait_event(ctxt, atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 && atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 && atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight && @@ -426,62 +528,39 @@ static int move_ratelimit(struct btree_trans *trans, return 0; } -static int move_get_io_opts(struct btree_trans *trans, - struct bch_io_opts *io_opts, - struct bkey_s_c k, u64 *cur_inum) -{ - struct bch_inode_unpacked inode; - int ret; - - if (*cur_inum == k.k->p.inode) - return 0; - - ret = lookup_inode(trans, - SPOS(0, k.k->p.inode, k.k->p.snapshot), - &inode); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - return ret; - - if (!ret) - bch2_inode_opts_get(io_opts, trans->c, &inode); - else - *io_opts = bch2_opts_to_inode_opts(trans->c->opts); - *cur_inum = k.k->p.inode; - return 0; -} - -static int __bch2_move_data(struct moving_context *ctxt, - struct bpos start, - struct bpos end, - move_pred_fn pred, void *arg, - enum btree_id btree_id) +static int bch2_move_data_btree(struct moving_context *ctxt, + struct bpos start, + struct bpos end, + move_pred_fn pred, void *arg, + enum btree_id btree_id) { - struct bch_fs *c = ctxt->c; - struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); + struct btree_trans *trans = ctxt->trans; + struct bch_fs *c = trans->c; + struct per_snapshot_io_opts snapshot_io_opts; + struct bch_io_opts *io_opts; struct bkey_buf sk; - struct btree_trans trans; struct btree_iter iter; struct bkey_s_c k; struct data_update_opts data_opts; - u64 cur_inum = U64_MAX; int ret = 0, ret2; + per_snapshot_io_opts_init(&snapshot_io_opts, c); bch2_bkey_buf_init(&sk); - bch2_trans_init(&trans, c, 0, 0); - ctxt->stats->data_type = BCH_DATA_user; - ctxt->stats->btree_id = btree_id; - ctxt->stats->pos = start; + if (ctxt->stats) { + ctxt->stats->data_type = BCH_DATA_user; + ctxt->stats->pos = BBPOS(btree_id, start); + } - bch2_trans_iter_init(&trans, &iter, btree_id, start, + bch2_trans_iter_init(trans, &iter, btree_id, start, BTREE_ITER_PREFETCH| BTREE_ITER_ALL_SNAPSHOTS); if (ctxt->rate) bch2_ratelimit_reset(ctxt->rate); - while (!move_ratelimit(&trans, ctxt)) { - bch2_trans_begin(&trans); + while (!bch2_move_ratelimit(ctxt)) { + bch2_trans_begin(trans); k = bch2_btree_iter_peek(&iter); if (!k.k) @@ -496,17 +575,19 @@ static int __bch2_move_data(struct moving_context *ctxt, if (bkey_ge(bkey_start_pos(k.k), end)) break; - ctxt->stats->pos = iter.pos; + if (ctxt->stats) + ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos); if (!bkey_extent_is_direct_data(k.k)) goto next_nondata; - ret = move_get_io_opts(&trans, &io_opts, k, &cur_inum); + io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, k); + ret = PTR_ERR_OR_ZERO(io_opts); if (ret) continue; memset(&data_opts, 0, sizeof(data_opts)); - if (!pred(c, arg, k, &io_opts, &data_opts)) + if (!pred(c, arg, k, io_opts, &data_opts)) goto next; /* @@ -515,151 +596,92 @@ static int __bch2_move_data(struct moving_context *ctxt, */ bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - bch2_trans_unlock(&trans); - ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts, - btree_id, k, data_opts); + ret2 = bch2_move_extent(ctxt, NULL, &iter, k, *io_opts, data_opts); if (ret2) { if (bch2_err_matches(ret2, BCH_ERR_transaction_restart)) continue; if (ret2 == -ENOMEM) { /* memory allocation failure, wait for some IO to finish */ - bch2_move_ctxt_wait_for_io(ctxt, &trans); + bch2_move_ctxt_wait_for_io(ctxt); continue; } /* XXX signal failure */ goto next; } - - if (ctxt->rate) - bch2_ratelimit_increment(ctxt->rate, k.k->size); next: - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + if (ctxt->stats) + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); next_nondata: bch2_btree_iter_advance(&iter); } - bch2_trans_iter_exit(&trans, &iter); - bch2_trans_exit(&trans); + bch2_trans_iter_exit(trans, &iter); bch2_bkey_buf_exit(&sk, c); + per_snapshot_io_opts_exit(&snapshot_io_opts); return ret; } -int bch2_move_data(struct bch_fs *c, - enum btree_id start_btree_id, struct bpos start_pos, - enum btree_id end_btree_id, struct bpos end_pos, - struct bch_ratelimit *rate, - struct bch_move_stats *stats, - struct write_point_specifier wp, - bool wait_on_copygc, - move_pred_fn pred, void *arg) +int __bch2_move_data(struct moving_context *ctxt, + struct bbpos start, + struct bbpos end, + move_pred_fn pred, void *arg) { - struct moving_context ctxt; + struct bch_fs *c = ctxt->trans->c; enum btree_id id; - int ret; - - bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); + int ret = 0; - for (id = start_btree_id; - id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1); + for (id = start.btree; + id <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1); id++) { - stats->btree_id = id; + ctxt->stats->pos = BBPOS(id, POS_MIN); - if (id != BTREE_ID_extents && - id != BTREE_ID_reflink) + if (!btree_type_has_ptrs(id) || + !bch2_btree_id_root(c, id)->b) continue; - ret = __bch2_move_data(&ctxt, - id == start_btree_id ? start_pos : POS_MIN, - id == end_btree_id ? end_pos : POS_MAX, + ret = bch2_move_data_btree(ctxt, + id == start.btree ? start.pos : POS_MIN, + id == end.btree ? end.pos : POS_MAX, pred, arg, id); if (ret) break; } - bch2_moving_ctxt_exit(&ctxt); - return ret; } -static noinline void verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen) +int bch2_move_data(struct bch_fs *c, + struct bbpos start, + struct bbpos end, + struct bch_ratelimit *rate, + struct bch_move_stats *stats, + struct write_point_specifier wp, + bool wait_on_copygc, + move_pred_fn pred, void *arg) { - struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k; - struct printbuf buf = PRINTBUF; - struct bch_backpointer bp; - u64 bp_offset = 0; - int ret; - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, - bucket, BTREE_ITER_CACHED); -again: - ret = lockrestart_do(trans, - bkey_err(k = bch2_btree_iter_peek_slot(&iter))); - - if (!ret && k.k->type == KEY_TYPE_alloc_v4) { - struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); - - if (a.v->gen == gen && - a.v->dirty_sectors) { - if (a.v->data_type == BCH_DATA_btree) { - bch2_trans_unlock(trans); - if (bch2_btree_interior_updates_flush(c)) - goto again; - goto failed_to_evacuate; - } - } - } - - bch2_trans_iter_exit(trans, &iter); - return; -failed_to_evacuate: - bch2_trans_iter_exit(trans, &iter); - - prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket ")); - bch2_bkey_val_to_text(&buf, c, k); - - while (1) { - bch2_trans_begin(trans); - - ret = bch2_get_next_backpointer(trans, bucket, gen, - &bp_offset, &bp, - BTREE_ITER_CACHED); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - continue; - if (ret) - break; - if (bp_offset == U64_MAX) - break; + struct moving_context ctxt; + int ret; - k = bch2_backpointer_get_key(trans, &iter, - bucket, bp_offset, bp); - ret = bkey_err(k); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - continue; - if (ret) - break; - if (!k.k) - continue; - prt_newline(&buf); - bch2_bkey_val_to_text(&buf, c, k); - bch2_trans_iter_exit(trans, &iter); - } + bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); + ret = __bch2_move_data(&ctxt, start, end, pred, arg); + bch2_moving_ctxt_exit(&ctxt); - bch2_print_string_as_lines(KERN_ERR, buf.buf); - printbuf_exit(&buf); + return ret; } -int __bch2_evacuate_bucket(struct btree_trans *trans, - struct moving_context *ctxt, +int bch2_evacuate_bucket(struct moving_context *ctxt, + struct move_bucket_in_flight *bucket_in_flight, struct bpos bucket, int gen, struct data_update_opts _data_opts) { - struct bch_fs *c = ctxt->c; + struct btree_trans *trans = ctxt->trans; + struct bch_fs *c = trans->c; + bool is_kthread = current->flags & PF_KTHREAD; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); struct btree_iter iter; struct bkey_buf sk; @@ -670,53 +692,56 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, struct data_update_opts data_opts; unsigned dirty_sectors, bucket_size; u64 fragmentation; - u64 bp_offset = 0, cur_inum = U64_MAX; + struct bpos bp_pos = POS_MIN; int ret = 0; + trace_bucket_evacuate(c, &bucket); + bch2_bkey_buf_init(&sk); + /* + * We're not run in a context that handles transaction restarts: + */ + bch2_trans_begin(trans); + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_CACHED); ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); bch2_trans_iter_exit(trans, &iter); - if (ret) { - bch_err(c, "%s: error looking up alloc key: %s", __func__, bch2_err_str(ret)); + bch_err_msg(c, ret, "looking up alloc key"); + if (ret) goto err; - } a = bch2_alloc_to_v4(k, &a_convert); - dirty_sectors = a->dirty_sectors; + dirty_sectors = bch2_bucket_sectors_dirty(*a); bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size; fragmentation = a->fragmentation_lru; - ret = bch2_btree_write_buffer_flush(trans); - if (ret) { - bch_err(c, "%s: error flushing btree write buffer: %s", __func__, bch2_err_str(ret)); + ret = bch2_btree_write_buffer_tryflush(trans); + bch_err_msg(c, ret, "flushing btree write buffer"); + if (ret) goto err; - } - while (!(ret = move_ratelimit(trans, ctxt))) { + while (!(ret = bch2_move_ratelimit(ctxt))) { + if (is_kthread && kthread_should_stop()) + break; + bch2_trans_begin(trans); ret = bch2_get_next_backpointer(trans, bucket, gen, - &bp_offset, &bp, + &bp_pos, &bp, BTREE_ITER_CACHED); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) goto err; - if (bp_offset == U64_MAX) + if (bkey_eq(bp_pos, POS_MAX)) break; if (!bp.level) { - const struct bch_extent_ptr *ptr; - struct bkey_s_c k; - unsigned i = 0; - - k = bch2_backpointer_get_key(trans, &iter, - bucket, bp_offset, bp); + k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -728,7 +753,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - ret = move_get_io_opts(trans, &io_opts, k, &cur_inum); + ret = bch2_move_get_io_opts_one(trans, &io_opts, k); if (ret) { bch2_trans_iter_exit(trans, &iter); continue; @@ -738,34 +763,38 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, data_opts.target = io_opts.background_target; data_opts.rewrite_ptrs = 0; + unsigned i = 0; bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { - if (ptr->dev == bucket.inode) + if (ptr->dev == bucket.inode) { data_opts.rewrite_ptrs |= 1U << i; + if (ptr->cached) { + bch2_trans_iter_exit(trans, &iter); + goto next; + } + } i++; } - ret = bch2_move_extent(trans, &iter, ctxt, io_opts, - bp.btree_id, k, data_opts); + ret = bch2_move_extent(ctxt, bucket_in_flight, + &iter, k, io_opts, data_opts); bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret == -ENOMEM) { /* memory allocation failure, wait for some IO to finish */ - bch2_move_ctxt_wait_for_io(ctxt, trans); + bch2_move_ctxt_wait_for_io(ctxt); continue; } if (ret) goto err; - if (ctxt->rate) - bch2_ratelimit_increment(ctxt->rate, k.k->size); - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + if (ctxt->stats) + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); } else { struct btree *b; - b = bch2_backpointer_get_node(trans, &iter, - bucket, bp_offset, bp); + b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp); ret = PTR_ERR_OR_ZERO(b); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) continue; @@ -776,6 +805,8 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, if (!b) goto next; + unsigned sectors = btree_ptr_sectors_written(&b->key); + ret = bch2_btree_node_rewrite(trans, &iter, b, 0); bch2_trans_iter_exit(trans, &iter); @@ -785,99 +816,77 @@ int __bch2_evacuate_bucket(struct btree_trans *trans, goto err; if (ctxt->rate) - bch2_ratelimit_increment(ctxt->rate, - c->opts.btree_node_size >> 9); - atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); - atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); + bch2_ratelimit_increment(ctxt->rate, sectors); + if (ctxt->stats) { + atomic64_add(sectors, &ctxt->stats->sectors_seen); + atomic64_add(sectors, &ctxt->stats->sectors_moved); + } } next: - bp_offset++; + bp_pos = bpos_nosnap_successor(bp_pos); } trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret); - - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && gen >= 0) { - bch2_trans_unlock(trans); - move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); - closure_sync(&ctxt->cl); - if (!ctxt->write_error) - verify_bucket_evacuated(trans, bucket, gen); - } err: bch2_bkey_buf_exit(&sk, c); return ret; } -int bch2_evacuate_bucket(struct bch_fs *c, - struct bpos bucket, int gen, - struct data_update_opts data_opts, - struct bch_ratelimit *rate, - struct bch_move_stats *stats, - struct write_point_specifier wp, - bool wait_on_copygc) -{ - struct btree_trans trans; - struct moving_context ctxt; - int ret; - - bch2_trans_init(&trans, c, 0, 0); - bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); - ret = __bch2_evacuate_bucket(&trans, &ctxt, bucket, gen, data_opts); - bch2_moving_ctxt_exit(&ctxt); - bch2_trans_exit(&trans); - - return ret; -} - typedef bool (*move_btree_pred)(struct bch_fs *, void *, struct btree *, struct bch_io_opts *, struct data_update_opts *); static int bch2_move_btree(struct bch_fs *c, - enum btree_id start_btree_id, struct bpos start_pos, - enum btree_id end_btree_id, struct bpos end_pos, + struct bbpos start, + struct bbpos end, move_btree_pred pred, void *arg, struct bch_move_stats *stats) { bool kthread = (current->flags & PF_KTHREAD) != 0; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_trans trans; + struct moving_context ctxt; + struct btree_trans *trans; struct btree_iter iter; struct btree *b; - enum btree_id id; + enum btree_id btree; struct data_update_opts data_opts; int ret = 0; - bch2_trans_init(&trans, c, 0, 0); - progress_list_add(c, stats); + bch2_moving_ctxt_init(&ctxt, c, NULL, stats, + writepoint_ptr(&c->btree_write_point), + true); + trans = ctxt.trans; stats->data_type = BCH_DATA_btree; - for (id = start_btree_id; - id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1); - id++) { - stats->btree_id = id; + for (btree = start.btree; + btree <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1); + btree ++) { + stats->pos = BBPOS(btree, POS_MIN); + + if (!bch2_btree_id_root(c, btree)->b) + continue; - bch2_trans_node_iter_init(&trans, &iter, id, POS_MIN, 0, 0, + bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, 0, BTREE_ITER_PREFETCH); retry: ret = 0; - while (bch2_trans_begin(&trans), + while (bch2_trans_begin(trans), (b = bch2_btree_iter_peek_node(&iter)) && !(ret = PTR_ERR_OR_ZERO(b))) { if (kthread && kthread_should_stop()) break; - if ((cmp_int(id, end_btree_id) ?: - bpos_cmp(b->key.k.p, end_pos)) > 0) + if ((cmp_int(btree, end.btree) ?: + bpos_cmp(b->key.k.p, end.pos)) > 0) break; - stats->pos = iter.pos; + stats->pos = BBPOS(iter.btree_id, iter.pos); if (!pred(c, arg, b, &io_opts, &data_opts)) goto next; - ret = bch2_btree_node_rewrite(&trans, &iter, b, 0) ?: ret; + ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret; if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) @@ -888,20 +897,16 @@ next: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; - bch2_trans_iter_exit(&trans, &iter); + bch2_trans_iter_exit(trans, &iter); if (kthread && kthread_should_stop()) break; } - bch2_trans_exit(&trans); - - if (ret) - bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret)); - + bch_err_fn(c, ret); + bch2_moving_ctxt_exit(&ctxt); bch2_btree_interior_updates_flush(c); - progress_list_del(c, stats); return ret; } @@ -930,7 +935,6 @@ static bool migrate_pred(struct bch_fs *c, void *arg, struct data_update_opts *data_opts) { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const struct bch_extent_ptr *ptr; struct bch_ioctl_data *op = arg; unsigned i = 0; @@ -1010,8 +1014,8 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) int ret; ret = bch2_move_btree(c, - 0, POS_MIN, - BTREE_ID_NR, SPOS_MAX, + BBPOS_MIN, + BBPOS_MAX, rewrite_old_nodes_pred, c, stats); if (!ret) { mutex_lock(&c->sb_lock); @@ -1022,30 +1026,65 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) mutex_unlock(&c->sb_lock); } + bch_err_fn(c, ret); return ret; } +static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg, + struct bkey_s_c k, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) +{ + unsigned durability = bch2_bkey_durability(c, k); + unsigned replicas = bkey_is_btree_ptr(k.k) + ? c->opts.metadata_replicas + : io_opts->data_replicas; + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + unsigned i = 0; + + bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) { + unsigned d = bch2_extent_ptr_durability(c, &p); + + if (d && durability - d >= replicas) { + data_opts->kill_ptrs |= BIT(i); + durability -= d; + } + + i++; + } + + return data_opts->kill_ptrs != 0; +} + +static bool drop_extra_replicas_btree_pred(struct bch_fs *c, void *arg, + struct btree *b, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) +{ + return drop_extra_replicas_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts); +} + int bch2_data_job(struct bch_fs *c, struct bch_move_stats *stats, struct bch_ioctl_data op) { + struct bbpos start = BBPOS(op.start_btree, op.start_pos); + struct bbpos end = BBPOS(op.end_btree, op.end_pos); int ret = 0; + if (op.op >= BCH_DATA_OP_NR) + return -EINVAL; + + bch2_move_stats_init(stats, bch2_data_ops_strs[op.op]); + switch (op.op) { - case BCH_DATA_OP_REREPLICATE: - bch2_move_stats_init(stats, "rereplicate"); + case BCH_DATA_OP_rereplicate: stats->data_type = BCH_DATA_journal; ret = bch2_journal_flush_device_pins(&c->journal, -1); - - ret = bch2_move_btree(c, - op.start_btree, op.start_pos, - op.end_btree, op.end_pos, + ret = bch2_move_btree(c, start, end, rereplicate_btree_pred, c, stats) ?: ret; - ret = bch2_replicas_gc2(c) ?: ret; - - ret = bch2_move_data(c, - op.start_btree, op.start_pos, - op.end_btree, op.end_pos, + ret = bch2_move_data(c, start, end, NULL, stats, writepoint_hashed((unsigned long) current), @@ -1053,23 +1092,15 @@ int bch2_data_job(struct bch_fs *c, rereplicate_pred, c) ?: ret; ret = bch2_replicas_gc2(c) ?: ret; break; - case BCH_DATA_OP_MIGRATE: + case BCH_DATA_OP_migrate: if (op.migrate.dev >= c->sb.nr_devices) return -EINVAL; - bch2_move_stats_init(stats, "migrate"); stats->data_type = BCH_DATA_journal; ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); - - ret = bch2_move_btree(c, - op.start_btree, op.start_pos, - op.end_btree, op.end_pos, + ret = bch2_move_btree(c, start, end, migrate_btree_pred, &op, stats) ?: ret; - ret = bch2_replicas_gc2(c) ?: ret; - - ret = bch2_move_data(c, - op.start_btree, op.start_pos, - op.end_btree, op.end_pos, + ret = bch2_move_data(c, start, end, NULL, stats, writepoint_hashed((unsigned long) current), @@ -1077,13 +1108,101 @@ int bch2_data_job(struct bch_fs *c, migrate_pred, &op) ?: ret; ret = bch2_replicas_gc2(c) ?: ret; break; - case BCH_DATA_OP_REWRITE_OLD_NODES: - bch2_move_stats_init(stats, "rewrite_old_nodes"); + case BCH_DATA_OP_rewrite_old_nodes: ret = bch2_scan_old_btree_nodes(c, stats); break; + case BCH_DATA_OP_drop_extra_replicas: + ret = bch2_move_btree(c, start, end, + drop_extra_replicas_btree_pred, c, stats) ?: ret; + ret = bch2_move_data(c, start, end, NULL, stats, + writepoint_hashed((unsigned long) current), + true, + drop_extra_replicas_pred, c) ?: ret; + ret = bch2_replicas_gc2(c) ?: ret; + break; default: ret = -EINVAL; } + bch2_move_stats_exit(stats, c); return ret; } + +void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats) +{ + prt_printf(out, "%s: data type==", stats->name); + bch2_prt_data_type(out, stats->data_type); + prt_str(out, " pos="); + bch2_bbpos_to_text(out, stats->pos); + prt_newline(out); + printbuf_indent_add(out, 2); + + prt_str(out, "keys moved: "); + prt_u64(out, atomic64_read(&stats->keys_moved)); + prt_newline(out); + + prt_str(out, "keys raced: "); + prt_u64(out, atomic64_read(&stats->keys_raced)); + prt_newline(out); + + prt_str(out, "bytes seen: "); + prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9); + prt_newline(out); + + prt_str(out, "bytes moved: "); + prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9); + prt_newline(out); + + prt_str(out, "bytes raced: "); + prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9); + prt_newline(out); + + printbuf_indent_sub(out, 2); +} + +static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt) +{ + struct moving_io *io; + + bch2_move_stats_to_text(out, ctxt->stats); + printbuf_indent_add(out, 2); + + prt_printf(out, "reads: ios %u/%u sectors %u/%u", + atomic_read(&ctxt->read_ios), + c->opts.move_ios_in_flight, + atomic_read(&ctxt->read_sectors), + c->opts.move_bytes_in_flight >> 9); + prt_newline(out); + + prt_printf(out, "writes: ios %u/%u sectors %u/%u", + atomic_read(&ctxt->write_ios), + c->opts.move_ios_in_flight, + atomic_read(&ctxt->write_sectors), + c->opts.move_bytes_in_flight >> 9); + prt_newline(out); + + printbuf_indent_add(out, 2); + + mutex_lock(&ctxt->lock); + list_for_each_entry(io, &ctxt->ios, io_list) + bch2_write_op_to_text(out, &io->write.op); + mutex_unlock(&ctxt->lock); + + printbuf_indent_sub(out, 4); +} + +void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct moving_context *ctxt; + + mutex_lock(&c->moving_context_lock); + list_for_each_entry(ctxt, &c->moving_context_list, list) + bch2_moving_ctxt_to_text(out, c, ctxt); + mutex_unlock(&c->moving_context_lock); +} + +void bch2_fs_move_init(struct bch_fs *c) +{ + INIT_LIST_HEAD(&c->moving_context_list); + mutex_init(&c->moving_context_lock); +}