#include "btree_update_interior.h"
#include "buckets.h"
#include "disk_groups.h"
+#include "ec.h"
#include "inode.h"
#include "io.h"
#include "journal_reclaim.h"
#include "move.h"
#include "replicas.h"
+#include "subvolume.h"
#include "super-io.h"
#include "keylist.h"
wait_queue_head_t wait;
};
+static int insert_snapshot_whiteouts(struct btree_trans *trans,
+ enum btree_id id,
+ struct bpos old_pos,
+ struct bpos new_pos)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter, update_iter;
+ struct bkey_s_c k;
+ struct snapshots_seen s;
+ int ret;
+
+ if (!btree_type_has_snapshots(id))
+ return 0;
+
+ snapshots_seen_init(&s);
+
+ if (!bkey_cmp(old_pos, new_pos))
+ return 0;
+
+ if (!snapshot_t(c, old_pos.snapshot)->children[0])
+ return 0;
+
+ bch2_trans_iter_init(trans, &iter, id, old_pos,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_ALL_SNAPSHOTS);
+ while (1) {
+next:
+ k = bch2_btree_iter_prev(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ break;
+
+ if (bkey_cmp(old_pos, k.k->p))
+ break;
+
+ if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
+ struct bkey_i *update;
+ u32 *i;
+
+ darray_for_each(s.ids, i)
+ if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, *i))
+ goto next;
+
+ update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
+
+ ret = PTR_ERR_OR_ZERO(update);
+ if (ret)
+ break;
+
+ bkey_init(&update->k);
+ update->k.p = new_pos;
+ update->k.p.snapshot = k.k->p.snapshot;
+
+ bch2_trans_iter_init(trans, &update_iter, id, update->k.p,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&update_iter) ?:
+ bch2_trans_update(trans, &update_iter, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ bch2_trans_iter_exit(trans, &update_iter);
+ if (ret)
+ break;
+
+ ret = snapshots_seen_add(c, &s, k.k->p.snapshot);
+ if (ret)
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+ darray_exit(s.ids);
+
+ return ret;
+}
+
static int bch2_migrate_index_update(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
struct btree_trans trans;
- struct btree_iter *iter;
+ struct btree_iter iter;
struct migrate_write *m =
container_of(op, struct migrate_write, op);
+ struct open_bucket *ec_ob = ec_open_bucket(c, &op->open_buckets);
struct keylist *keys = &op->insert_keys;
struct bkey_buf _new, _insert;
int ret = 0;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
- iter = bch2_trans_get_iter(&trans, m->btree_id,
- bkey_start_pos(&bch2_keylist_front(keys)->k),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ bch2_trans_iter_init(&trans, &iter, m->btree_id,
+ bkey_start_pos(&bch2_keylist_front(keys)->k),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
while (1) {
struct bkey_s_c k;
struct bkey_i_extent *new;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
+ struct bpos next_pos;
bool did_work = false;
- bool extending = false, should_check_enospc;
+ bool should_check_enospc;
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
- bch2_trans_reset(&trans, 0);
+ bch2_trans_begin(&trans);
- k = bch2_btree_iter_peek_slot(iter);
+ k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
new = bkey_i_to_extent(_new.k);
- bch2_cut_front(iter->pos, &new->k_i);
+ bch2_cut_front(iter.pos, &new->k_i);
- bch2_cut_front(iter->pos, insert);
+ bch2_cut_front(iter.pos, insert);
bch2_cut_back(new->k.p, insert);
bch2_cut_back(insert->k.p, &new->k_i);
extent_for_each_ptr(extent_i_to_s(new), new_ptr)
new_ptr->cached = true;
- bch2_bkey_drop_ptr(bkey_i_to_s(insert), old_ptr);
+ __bch2_bkey_drop_ptr(bkey_i_to_s(insert), old_ptr);
}
extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) {
op->opts.background_target,
op->opts.data_replicas);
- ret = bch2_sum_sector_overwrites(&trans, iter, insert,
- &extending,
+ ret = bch2_sum_sector_overwrites(&trans, &iter, insert,
&should_check_enospc,
&i_sectors_delta,
&disk_sectors_delta);
goto out;
}
- ret = bch2_trans_update(&trans, iter, insert, 0) ?:
+ next_pos = insert->k.p;
+
+ ret = insert_snapshot_whiteouts(&trans, m->btree_id,
+ k.k->p, insert->k.p) ?:
+ bch2_trans_update(&trans, &iter, insert,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
bch2_trans_commit(&trans, &op->res,
op_journal_seq(op),
BTREE_INSERT_NOFAIL|
m->data_opts.btree_insert_flags);
-err:
- if (!ret)
+ if (!ret) {
+ bch2_btree_iter_set_pos(&iter, next_pos);
atomic_long_inc(&c->extent_migrate_done);
+ if (ec_ob)
+ bch2_ob_add_backpointer(c, ec_ob, &insert->k);
+ }
+err:
if (ret == -EINTR)
ret = 0;
if (ret)
break;
next:
- while (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) >= 0) {
+ while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
bch2_keylist_pop_front(keys);
if (bch2_keylist_empty(keys))
goto out;
continue;
nomatch:
if (m->ctxt) {
- BUG_ON(k.k->p.offset <= iter->pos.offset);
+ BUG_ON(k.k->p.offset <= iter.pos.offset);
atomic64_inc(&m->ctxt->stats->keys_raced);
- atomic64_add(k.k->p.offset - iter->pos.offset,
+ atomic64_add(k.k->p.offset - iter.pos.offset,
&m->ctxt->stats->sectors_raced);
}
atomic_long_inc(&c->extent_migrate_raced);
trace_move_race(&new->k);
- bch2_btree_iter_next_slot(iter);
+ bch2_btree_iter_advance(&iter);
goto next;
}
out:
- bch2_trans_iter_put(&trans, iter);
+ bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&_insert, c);
bch2_bkey_buf_exit(&_new, c);
m->op.crc = rbio->pick.crc;
m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9;
- if (bch2_csum_type_is_encryption(m->op.crc.csum_type)) {
- m->op.nonce = m->op.crc.nonce + m->op.crc.offset;
- m->op.csum_type = m->op.crc.csum_type;
- }
-
if (m->data_cmd == DATA_REWRITE)
bch2_dev_list_drop_dev(&m->op.devs_have, m->data_opts.rewrite_dev);
}
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
+ struct bch_extent_crc_unpacked crc;
struct extent_ptr_decoded p;
int ret;
m->op.target = data_opts.target,
m->op.write_point = wp;
+ /*
+ * op->csum_type is normally initialized from the fs/file's current
+ * options - but if an extent is encrypted, we require that it stays
+ * encrypted:
+ */
+ bkey_for_each_crc(k.k, ptrs, crc, entry)
+ if (bch2_csum_type_is_encryption(crc.csum_type)) {
+ m->op.nonce = crc.nonce + crc.offset;
+ m->op.csum_type = crc.csum_type;
+ break;
+ }
+
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) {
- m->op.alloc_reserve = RESERVE_MOVINGGC;
- m->op.flags |= BCH_WRITE_ALLOC_NOWAIT;
+ m->op.alloc_reserve = RESERVE_movinggc;
} else {
/* XXX: this should probably be passed in */
m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
unsigned compressed_sectors = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (p.ptr.dev == data_opts.rewrite_dev &&
- !p.ptr.cached &&
- crc_is_compressed(p.crc))
- compressed_sectors += p.crc.compressed_size;
+ if (p.ptr.dev == data_opts.rewrite_dev) {
+ if (p.ptr.cached)
+ m->op.flags |= BCH_WRITE_CACHED;
+
+ if (!p.ptr.cached &&
+ crc_is_compressed(p.crc))
+ compressed_sectors += p.crc.compressed_size;
+ }
if (compressed_sectors) {
ret = bch2_disk_reservation_add(c, &m->op.res,
atomic_sub(io->read_sectors, &ctxt->read_sectors);
io->read_completed = true;
- if (next_pending_write(ctxt))
- wake_up(&ctxt->wait);
-
+ wake_up(&ctxt->wait);
closure_put(&ctxt->cl);
}
-static void do_pending_writes(struct moving_context *ctxt)
+static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *trans)
{
struct moving_io *io;
+ if (trans)
+ bch2_trans_unlock(trans);
+
while ((io = next_pending_write(ctxt))) {
list_del(&io->list);
closure_call(&io->cl, move_write, NULL, &ctxt->cl);
}
}
-#define move_ctxt_wait_event(_ctxt, _cond) \
+#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
do { \
- do_pending_writes(_ctxt); \
+ do_pending_writes(_ctxt, _trans); \
\
if (_cond) \
break; \
next_pending_write(_ctxt) || (_cond)); \
} while (1)
-static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
+static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
+ struct btree_trans *trans)
{
unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
- move_ctxt_wait_event(ctxt,
+ move_ctxt_wait_event(ctxt, trans,
!atomic_read(&ctxt->write_sectors) ||
atomic_read(&ctxt->write_sectors) != sectors_pending);
}
unsigned sectors = k.k->size, pages;
int ret = -ENOMEM;
- move_ctxt_wait_event(ctxt,
- atomic_read(&ctxt->write_sectors) <
- SECTORS_IN_FLIGHT_PER_DEVICE);
-
- move_ctxt_wait_event(ctxt,
- atomic_read(&ctxt->read_sectors) <
- SECTORS_IN_FLIGHT_PER_DEVICE);
-
/* write path might have to decompress data: */
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
static int lookup_inode(struct btree_trans *trans, struct bpos pos,
struct bch_inode_unpacked *inode)
{
- struct btree_iter *iter;
+ struct btree_iter iter;
struct bkey_s_c k;
int ret;
- iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, pos,
- BTREE_ITER_ALL_SNAPSHOTS);
- k = bch2_btree_iter_peek(iter);
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, pos,
+ BTREE_ITER_ALL_SNAPSHOTS);
+ k = bch2_btree_iter_peek(&iter);
ret = bkey_err(k);
if (ret)
goto err;
goto err;
}
- ret = k.k->type == KEY_TYPE_inode ? 0 : -EIO;
+ ret = bkey_is_inode(k.k) ? 0 : -EIO;
if (ret)
goto err;
- ret = bch2_inode_unpack(bkey_s_c_to_inode(k), inode);
+ ret = bch2_inode_unpack(k, inode);
if (ret)
goto err;
err:
- bch2_trans_iter_put(trans, iter);
+ bch2_trans_iter_exit(trans, &iter);
return ret;
}
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
struct bkey_buf sk;
struct btree_trans trans;
- struct btree_iter *iter;
+ struct btree_iter iter;
struct bkey_s_c k;
struct data_opts data_opts;
enum data_cmd data_cmd;
stats->btree_id = btree_id;
stats->pos = start;
- iter = bch2_trans_get_iter(&trans, btree_id, start,
- BTREE_ITER_PREFETCH);
+ bch2_trans_iter_init(&trans, &iter, btree_id, start,
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS);
if (rate)
bch2_ratelimit_reset(rate);
schedule_timeout(delay);
if (unlikely(freezing(current))) {
- bch2_trans_unlock(&trans);
- move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
+ move_ctxt_wait_event(ctxt, &trans, list_empty(&ctxt->reads));
try_to_freeze();
}
} while (delay);
- k = bch2_btree_iter_peek(iter);
+ move_ctxt_wait_event(ctxt, &trans,
+ atomic_read(&ctxt->write_sectors) <
+ SECTORS_IN_FLIGHT_PER_DEVICE);
+
+ move_ctxt_wait_event(ctxt, &trans,
+ atomic_read(&ctxt->read_sectors) <
+ SECTORS_IN_FLIGHT_PER_DEVICE);
- stats->pos = iter->pos;
+ bch2_trans_begin(&trans);
+ k = bch2_btree_iter_peek(&iter);
if (!k.k)
break;
+
ret = bkey_err(k);
+ if (ret == -EINTR)
+ continue;
if (ret)
break;
+
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
+ stats->pos = iter.pos;
+
if (!bkey_extent_is_direct_data(k.k))
goto next_nondata;
BUG();
}
- /* unlock before doing IO: */
+ /*
+ * The iterator gets unlocked by __bch2_read_extent - need to
+ * save a copy of @k elsewhere:
+ */
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
- bch2_trans_unlock(&trans);
ret2 = bch2_move_extent(&trans, ctxt, wp, io_opts, btree_id, k,
data_cmd, data_opts);
if (ret2) {
- if (ret2 == -EINTR) {
- bch2_trans_reset(&trans, 0);
- bch2_trans_cond_resched(&trans);
+ if (ret2 == -EINTR)
continue;
- }
if (ret2 == -ENOMEM) {
/* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt);
+ bch2_move_ctxt_wait_for_io(ctxt, &trans);
continue;
}
if (rate)
bch2_ratelimit_increment(rate, k.k->size);
next:
- atomic64_add(k.k->size * bch2_bkey_nr_ptrs_allocated(k),
- &stats->sectors_seen);
+ atomic64_add(k.k->size, &stats->sectors_seen);
next_nondata:
- bch2_btree_iter_advance(iter);
- bch2_trans_cond_resched(&trans);
+ bch2_btree_iter_advance(&iter);
}
out:
- bch2_trans_iter_put(&trans, iter);
- ret = bch2_trans_exit(&trans) ?: ret;
+ bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
return ret;
}
+inline void bch_move_stats_init(struct bch_move_stats *stats, char *name)
+{
+ memset(stats, 0, sizeof(*stats));
+
+ scnprintf(stats->name, sizeof(stats->name),
+ "%s", name);
+}
+
+static inline void progress_list_add(struct bch_fs *c,
+ struct bch_move_stats *stats)
+{
+ mutex_lock(&c->data_progress_lock);
+ list_add(&stats->list, &c->data_progress_list);
+ mutex_unlock(&c->data_progress_lock);
+}
+
+static inline void progress_list_del(struct bch_fs *c,
+ struct bch_move_stats *stats)
+{
+ mutex_lock(&c->data_progress_lock);
+ list_del(&stats->list);
+ mutex_unlock(&c->data_progress_lock);
+}
+
int bch2_move_data(struct bch_fs *c,
enum btree_id start_btree_id, struct bpos start_pos,
enum btree_id end_btree_id, struct bpos end_pos,
enum btree_id id;
int ret;
+ progress_list_add(c, stats);
closure_init_stack(&ctxt.cl);
INIT_LIST_HEAD(&ctxt.reads);
init_waitqueue_head(&ctxt.wait);
}
- move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads));
+ move_ctxt_wait_event(&ctxt, NULL, list_empty(&ctxt.reads));
closure_sync(&ctxt.cl);
EBUG_ON(atomic_read(&ctxt.write_sectors));
atomic64_read(&stats->sectors_moved),
atomic64_read(&stats->keys_moved));
+ progress_list_del(c, stats);
return ret;
}
bool kthread = (current->flags & PF_KTHREAD) != 0;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
struct btree_trans trans;
- struct btree_iter *iter;
+ struct btree_iter iter;
struct btree *b;
enum btree_id id;
struct data_opts data_opts;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
+ progress_list_add(c, stats);
stats->data_type = BCH_DATA_btree;
id++) {
stats->btree_id = id;
- for_each_btree_node(&trans, iter, id,
- id == start_btree_id ? start_pos : POS_MIN,
- BTREE_ITER_PREFETCH, b) {
+ bch2_trans_node_iter_init(&trans, &iter, id, POS_MIN, 0, 0,
+ BTREE_ITER_PREFETCH);
+retry:
+ ret = 0;
+ while (bch2_trans_begin(&trans),
+ (b = bch2_btree_iter_peek_node(&iter)) &&
+ !(ret = PTR_ERR_OR_ZERO(b))) {
if (kthread && kthread_should_stop())
break;
bpos_cmp(b->key.k.p, end_pos)) > 0)
break;
- stats->pos = iter->pos;
+ stats->pos = iter.pos;
switch ((cmd = pred(c, arg, b, &io_opts, &data_opts))) {
case DATA_SKIP:
BUG();
}
- ret = bch2_btree_node_rewrite(c, iter,
- b->data->keys.seq, 0) ?: ret;
+ ret = bch2_btree_node_rewrite(&trans, &iter, b, 0) ?: ret;
+ if (ret == -EINTR)
+ continue;
+ if (ret)
+ break;
next:
- bch2_trans_cond_resched(&trans);
+ bch2_btree_iter_next_node(&iter);
}
+ if (ret == -EINTR)
+ goto retry;
+
+ bch2_trans_iter_exit(&trans, &iter);
- ret = bch2_trans_iter_free(&trans, iter) ?: ret;
if (kthread && kthread_should_stop())
break;
}
if (ret)
bch_err(c, "error %i in bch2_move_btree", ret);
+ /* flush relevant btree updates */
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
+
+ progress_list_del(c, stats);
return ret;
}
struct data_opts *data_opts)
{
unsigned nr_good = bch2_bkey_durability(c, k);
- unsigned replicas = 0;
-
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr:
- replicas = c->opts.metadata_replicas;
- break;
- case KEY_TYPE_extent:
- replicas = io_opts->data_replicas;
- break;
- }
+ unsigned replicas = bkey_is_btree_ptr(k.k)
+ ? c->opts.metadata_replicas
+ : io_opts->data_replicas;
if (!nr_good || nr_good >= replicas)
return DATA_SKIP;
switch (op.op) {
case BCH_DATA_OP_REREPLICATE:
+ bch_move_stats_init(stats, "rereplicate");
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, -1);
op.start_btree, op.start_pos,
op.end_btree, op.end_pos,
rereplicate_btree_pred, c, stats) ?: ret;
-
- closure_wait_event(&c->btree_interior_update_wait,
- !bch2_btree_interior_updates_nr_pending(c));
-
ret = bch2_replicas_gc2(c) ?: ret;
ret = bch2_move_data(c,
if (op.migrate.dev >= c->sb.nr_devices)
return -EINVAL;
+ bch_move_stats_init(stats, "migrate");
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
ret = bch2_replicas_gc2(c) ?: ret;
break;
case BCH_DATA_OP_REWRITE_OLD_NODES:
+ bch_move_stats_init(stats, "rewrite_old_nodes");
ret = bch2_scan_old_btree_nodes(c, stats);
break;
default: