+ atomic64_add(k.k->size * bch2_bkey_nr_dirty_ptrs(k),
+ &stats->sectors_seen);
+next_nondata:
+ bch2_btree_iter_next(iter);
+ bch2_trans_cond_resched(&trans);
+ }
+out:
+ ret = bch2_trans_exit(&trans) ?: ret;
+
+ return ret;
+}
+
+int bch2_move_data(struct bch_fs *c,
+ struct bch_ratelimit *rate,
+ struct write_point_specifier wp,
+ struct bpos start,
+ struct bpos end,
+ move_pred_fn pred, void *arg,
+ struct bch_move_stats *stats)
+{
+ struct moving_context ctxt = { .stats = stats };
+ int ret;
+
+ closure_init_stack(&ctxt.cl);
+ INIT_LIST_HEAD(&ctxt.reads);
+ init_waitqueue_head(&ctxt.wait);
+
+ stats->data_type = BCH_DATA_USER;
+
+ ret = __bch2_move_data(c, &ctxt, rate, wp, start, end,
+ pred, arg, stats, BTREE_ID_EXTENTS) ?:
+ __bch2_move_data(c, &ctxt, rate, wp, start, end,
+ pred, arg, stats, BTREE_ID_REFLINK);
+
+ move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads));
+ closure_sync(&ctxt.cl);
+
+ EBUG_ON(atomic_read(&ctxt.write_sectors));
+
+ trace_move_data(c,
+ atomic64_read(&stats->sectors_moved),
+ atomic64_read(&stats->keys_moved));
+
+ return ret;
+}
+
+static int bch2_move_btree(struct bch_fs *c,
+ move_pred_fn pred,
+ void *arg,
+ struct bch_move_stats *stats)
+{
+ struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ struct btree *b;
+ unsigned id;
+ struct data_opts data_opts;
+ enum data_cmd cmd;
+ int ret = 0;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ stats->data_type = BCH_DATA_BTREE;
+
+ for (id = 0; id < BTREE_ID_NR; id++) {
+ stats->btree_id = id;
+
+ for_each_btree_node(&trans, iter, id, POS_MIN,
+ BTREE_ITER_PREFETCH, b) {
+ stats->pos = iter->pos;
+
+ switch ((cmd = pred(c, arg,
+ bkey_i_to_s_c(&b->key),
+ &io_opts, &data_opts))) {
+ case DATA_SKIP:
+ goto next;
+ case DATA_SCRUB:
+ BUG();
+ case DATA_ADD_REPLICAS:
+ case DATA_REWRITE:
+ break;
+ default:
+ BUG();
+ }
+
+ ret = bch2_btree_node_rewrite(c, iter,
+ b->data->keys.seq, 0) ?: ret;
+next:
+ bch2_trans_cond_resched(&trans);
+ }
+
+ ret = bch2_trans_iter_free(&trans, iter) ?: ret;
+ }
+
+ bch2_trans_exit(&trans);
+
+ return ret;
+}
+
+#if 0
+static enum data_cmd scrub_pred(struct bch_fs *c, void *arg,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_opts *data_opts)
+{
+ return DATA_SCRUB;
+}
+#endif
+
+static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_opts *data_opts)
+{
+ unsigned nr_good = bch2_bkey_durability(c, k);
+ unsigned replicas = 0;
+
+ switch (k.k->type) {
+ case KEY_TYPE_btree_ptr:
+ replicas = c->opts.metadata_replicas;
+ break;
+ case KEY_TYPE_extent:
+ replicas = io_opts->data_replicas;
+ break;