#include "btree_gc.h"
#include "btree_update.h"
#include "btree_update_interior.h"
+#include "btree_write_buffer.h"
#include "disk_groups.h"
#include "ec.h"
#include "errcode.h"
}
struct moving_io {
- struct list_head list;
- struct closure cl;
- bool read_completed;
+ struct list_head read_list;
+ struct list_head io_list;
+ struct move_bucket_in_flight *b;
+ struct closure cl;
+ bool read_completed;
- unsigned read_sectors;
- unsigned write_sectors;
+ unsigned read_sectors;
+ unsigned write_sectors;
- struct bch_read_bio rbio;
+ struct bch_read_bio rbio;
- struct data_update write;
+ struct data_update write;
/* Must be last since it is variable size */
- struct bio_vec bi_inline_vecs[0];
+ struct bio_vec bi_inline_vecs[0];
};
static void move_free(struct moving_io *io)
{
struct moving_context *ctxt = io->write.ctxt;
- struct bch_fs *c = ctxt->c;
+
+ if (io->b)
+ atomic_dec(&io->b->count);
bch2_data_update_exit(&io->write);
+
+ mutex_lock(&ctxt->lock);
+ list_del(&io->io_list);
wake_up(&ctxt->wait);
- percpu_ref_put(&c->writes);
+ mutex_unlock(&ctxt->lock);
+
kfree(io);
}
ctxt->write_error = true;
atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
+ atomic_dec(&io->write.ctxt->write_ios);
move_free(io);
closure_put(&ctxt->cl);
}
closure_get(&io->write.ctxt->cl);
atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
+ atomic_inc(&io->write.ctxt->write_ios);
bch2_data_update_read_done(&io->write, io->rbio.pick.crc);
}
-static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
+struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt)
{
struct moving_io *io =
- list_first_entry_or_null(&ctxt->reads, struct moving_io, list);
+ list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list);
return io && io->read_completed ? io : NULL;
}
struct moving_context *ctxt = io->write.ctxt;
atomic_sub(io->read_sectors, &ctxt->read_sectors);
+ atomic_dec(&ctxt->read_ios);
io->read_completed = true;
wake_up(&ctxt->wait);
closure_put(&ctxt->cl);
}
-static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *trans)
+void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt,
+ struct btree_trans *trans)
{
struct moving_io *io;
if (trans)
bch2_trans_unlock(trans);
- while ((io = next_pending_write(ctxt))) {
- list_del(&io->list);
+ while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) {
+ list_del(&io->read_list);
move_write(io);
}
}
-#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
-do { \
- do_pending_writes(_ctxt, _trans); \
- \
- if (_cond) \
- break; \
- __wait_event((_ctxt)->wait, \
- next_pending_write(_ctxt) || (_cond)); \
-} while (1)
-
static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
struct btree_trans *trans)
{
void bch2_moving_ctxt_exit(struct moving_context *ctxt)
{
+ struct bch_fs *c = ctxt->c;
+
move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads));
closure_sync(&ctxt->cl);
+
EBUG_ON(atomic_read(&ctxt->write_sectors));
+ EBUG_ON(atomic_read(&ctxt->write_ios));
+ EBUG_ON(atomic_read(&ctxt->read_sectors));
+ EBUG_ON(atomic_read(&ctxt->read_ios));
if (ctxt->stats) {
- progress_list_del(ctxt->c, ctxt->stats);
-
- trace_move_data(ctxt->c,
+ progress_list_del(c, ctxt->stats);
+ trace_move_data(c,
atomic64_read(&ctxt->stats->sectors_moved),
atomic64_read(&ctxt->stats->keys_moved));
}
+
+ mutex_lock(&c->moving_context_lock);
+ list_del(&ctxt->list);
+ mutex_unlock(&c->moving_context_lock);
}
void bch2_moving_ctxt_init(struct moving_context *ctxt,
memset(ctxt, 0, sizeof(*ctxt));
ctxt->c = c;
+ ctxt->fn = (void *) _RET_IP_;
ctxt->rate = rate;
ctxt->stats = stats;
ctxt->wp = wp;
ctxt->wait_on_copygc = wait_on_copygc;
closure_init_stack(&ctxt->cl);
+
+ mutex_init(&ctxt->lock);
INIT_LIST_HEAD(&ctxt->reads);
+ INIT_LIST_HEAD(&ctxt->ios);
init_waitqueue_head(&ctxt->wait);
+ mutex_lock(&c->moving_context_lock);
+ list_add(&ctxt->list, &c->moving_context_list);
+ mutex_unlock(&c->moving_context_lock);
+
if (stats) {
progress_list_add(c, stats);
stats->data_type = BCH_DATA_user;
if (bkey_deleted(&n->k))
n->k.size = 0;
- return bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ return bch2_trans_relock(trans) ?:
+ bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
}
static int bch2_move_extent(struct btree_trans *trans,
struct btree_iter *iter,
struct moving_context *ctxt,
+ struct move_bucket_in_flight *bucket_in_flight,
struct bch_io_opts io_opts,
enum btree_id btree_id,
struct bkey_s_c k,
return 0;
}
- if (!percpu_ref_tryget_live(&c->writes))
- return -BCH_ERR_erofs_no_writes;
-
/*
* Before memory allocations & taking nocow locks in
* bch2_data_update_init():
if (!io)
goto err;
+ INIT_LIST_HEAD(&io->io_list);
io->write.ctxt = ctxt;
io->read_sectors = k.k->size;
io->write_sectors = k.k->size;
bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
io->rbio.bio.bi_iter.bi_size = sectors << 9;
- bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0);
+ io->rbio.bio.bi_opf = REQ_OP_READ;
io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
io->rbio.bio.bi_end_io = move_read_endio;
- ret = bch2_data_update_init(c, &io->write, ctxt->wp, io_opts,
- data_opts, btree_id, k);
+ ret = bch2_data_update_init(trans, ctxt, &io->write, ctxt->wp,
+ io_opts, data_opts, btree_id, k);
if (ret && ret != -BCH_ERR_unwritten_extent_update)
goto err_free_pages;
- io->write.ctxt = ctxt;
- io->write.op.end_io = move_write_done;
-
- atomic64_inc(&ctxt->stats->keys_moved);
- atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
-
if (ret == -BCH_ERR_unwritten_extent_update) {
bch2_update_unwritten_extent(trans, &io->write);
move_free(io);
BUG_ON(ret);
+ io->write.ctxt = ctxt;
+ io->write.op.end_io = move_write_done;
+
+ if (ctxt->stats) {
+ atomic64_inc(&ctxt->stats->keys_moved);
+ atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
+ }
+
+ if (bucket_in_flight) {
+ io->b = bucket_in_flight;
+ atomic_inc(&io->b->count);
+ }
+
this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
trace_move_extent_read(k.k);
+
+ mutex_lock(&ctxt->lock);
atomic_add(io->read_sectors, &ctxt->read_sectors);
- list_add_tail(&io->list, &ctxt->reads);
+ atomic_inc(&ctxt->read_ios);
+
+ list_add_tail(&io->read_list, &ctxt->reads);
+ list_add_tail(&io->io_list, &ctxt->ios);
+ mutex_unlock(&ctxt->lock);
/*
* dropped by move_read_endio() - guards against use after free of
err_free:
kfree(io);
err:
- percpu_ref_put(&c->writes);
trace_and_count(c, move_extent_alloc_mem_fail, k.k);
return ret;
}
}
} while (delay);
+ /*
+ * XXX: these limits really ought to be per device, SSDs and hard drives
+ * will want different limits
+ */
move_ctxt_wait_event(ctxt, trans,
- atomic_read(&ctxt->write_sectors) <
- c->opts.move_bytes_in_flight >> 9);
-
- move_ctxt_wait_event(ctxt, trans,
- atomic_read(&ctxt->read_sectors) <
- c->opts.move_bytes_in_flight >> 9);
+ atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 &&
+ atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 &&
+ atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight &&
+ atomic_read(&ctxt->read_ios) < c->opts.move_ios_in_flight);
return 0;
}
bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
- ctxt->stats->data_type = BCH_DATA_user;
- ctxt->stats->btree_id = btree_id;
- ctxt->stats->pos = start;
+ if (ctxt->stats) {
+ ctxt->stats->data_type = BCH_DATA_user;
+ ctxt->stats->btree_id = btree_id;
+ ctxt->stats->pos = start;
+ }
bch2_trans_iter_init(&trans, &iter, btree_id, start,
BTREE_ITER_PREFETCH|
if (bkey_ge(bkey_start_pos(k.k), end))
break;
- ctxt->stats->pos = iter.pos;
+ if (ctxt->stats)
+ ctxt->stats->pos = iter.pos;
if (!bkey_extent_is_direct_data(k.k))
goto next_nondata;
k = bkey_i_to_s_c(sk.k);
bch2_trans_unlock(&trans);
- ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts,
- btree_id, k, data_opts);
+ ret2 = bch2_move_extent(&trans, &iter, ctxt, NULL,
+ io_opts, btree_id, k, data_opts);
if (ret2) {
if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
continue;
if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size);
next:
- atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
+ if (ctxt->stats)
+ atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
next_nondata:
bch2_btree_iter_advance(&iter);
}
return ret;
}
-static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen)
+void bch2_verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct printbuf buf = PRINTBUF;
struct bch_backpointer bp;
- u64 bp_offset = 0;
+ struct bpos bp_pos = POS_MIN;
+ unsigned nr_bps = 0;
int ret;
+ bch2_trans_begin(trans);
+
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
bucket, BTREE_ITER_CACHED);
again:
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
+ ret = lockrestart_do(trans,
+ bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
if (!ret && k.k->type == KEY_TYPE_alloc_v4) {
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
}
}
+ set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return;
failed_to_evacuate:
bch2_trans_iter_exit(trans, &iter);
+ if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
+ return;
+
prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket "));
bch2_bkey_val_to_text(&buf, c, k);
bch2_trans_begin(trans);
ret = bch2_get_next_backpointer(trans, bucket, gen,
- &bp_offset, &bp,
+ &bp_pos, &bp,
BTREE_ITER_CACHED);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
- if (bp_offset == U64_MAX)
+ if (bkey_eq(bp_pos, POS_MAX))
break;
- k = bch2_backpointer_get_key(trans, &iter,
- bucket, bp_offset, bp);
+ k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
bch2_trans_iter_exit(trans, &iter);
+
+ if (++nr_bps > 10)
+ break;
+ bp_pos = bpos_nosnap_successor(bp_pos);
}
bch2_print_string_as_lines(KERN_ERR, buf.buf);
printbuf_exit(&buf);
- return 0;
}
-int __bch2_evacuate_bucket(struct moving_context *ctxt,
+int __bch2_evacuate_bucket(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct move_bucket_in_flight *bucket_in_flight,
struct bpos bucket, int gen,
struct data_update_opts _data_opts)
{
struct bch_fs *c = ctxt->c;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
- struct btree_trans trans;
struct btree_iter iter;
struct bkey_buf sk;
struct bch_backpointer bp;
struct bkey_s_c k;
struct data_update_opts data_opts;
unsigned dirty_sectors, bucket_size;
- u64 bp_offset = 0, cur_inum = U64_MAX;
+ u64 fragmentation;
+ u64 cur_inum = U64_MAX;
+ struct bpos bp_pos = POS_MIN;
int ret = 0;
bch2_bkey_buf_init(&sk);
- bch2_trans_init(&trans, c, 0, 0);
- bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc,
+ /*
+ * We're not run in a context that handles transaction restarts:
+ */
+ bch2_trans_begin(trans);
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
bucket, BTREE_ITER_CACHED);
- ret = lockrestart_do(&trans,
+ ret = lockrestart_do(trans,
bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
- bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_iter_exit(trans, &iter);
- if (!ret) {
- a = bch2_alloc_to_v4(k, &a_convert);
- dirty_sectors = a->dirty_sectors;
- bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
+ if (ret) {
+ bch_err(c, "%s: error looking up alloc key: %s", __func__, bch2_err_str(ret));
+ goto err;
}
- while (!(ret = move_ratelimit(&trans, ctxt))) {
- bch2_trans_begin(&trans);
+ a = bch2_alloc_to_v4(k, &a_convert);
+ dirty_sectors = a->dirty_sectors;
+ bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
+ fragmentation = a->fragmentation_lru;
- ret = bch2_get_next_backpointer(&trans, bucket, gen,
- &bp_offset, &bp,
+ ret = bch2_btree_write_buffer_flush(trans);
+ if (ret) {
+ bch_err(c, "%s: error flushing btree write buffer: %s", __func__, bch2_err_str(ret));
+ goto err;
+ }
+
+ while (!(ret = move_ratelimit(trans, ctxt))) {
+ bch2_trans_begin(trans);
+
+ ret = bch2_get_next_backpointer(trans, bucket, gen,
+ &bp_pos, &bp,
BTREE_ITER_CACHED);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
goto err;
- if (bp_offset == U64_MAX)
+ if (bkey_eq(bp_pos, POS_MAX))
break;
if (!bp.level) {
struct bkey_s_c k;
unsigned i = 0;
- k = bch2_backpointer_get_key(&trans, &iter,
- bucket, bp_offset, bp);
+ k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
goto err;
if (!k.k)
- continue;
+ goto next;
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
- ret = move_get_io_opts(&trans, &io_opts, k, &cur_inum);
+ ret = move_get_io_opts(trans, &io_opts, k, &cur_inum);
if (ret) {
- bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_iter_exit(trans, &iter);
continue;
}
data_opts.rewrite_ptrs = 0;
bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
- if (ptr->dev == bucket.inode)
+ if (ptr->dev == bucket.inode) {
data_opts.rewrite_ptrs |= 1U << i;
+ if (ptr->cached) {
+ bch2_trans_iter_exit(trans, &iter);
+ goto next;
+ }
+ }
i++;
}
- ret = bch2_move_extent(&trans, &iter, ctxt, io_opts,
- bp.btree_id, k, data_opts);
- bch2_trans_iter_exit(&trans, &iter);
+ ret = bch2_move_extent(trans, &iter, ctxt,
+ bucket_in_flight,
+ io_opts, bp.btree_id, k, data_opts);
+ bch2_trans_iter_exit(trans, &iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret == -ENOMEM) {
/* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt, &trans);
+ bch2_move_ctxt_wait_for_io(ctxt, trans);
continue;
}
if (ret)
if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size);
- atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
+ if (ctxt->stats)
+ atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
} else {
struct btree *b;
- b = bch2_backpointer_get_node(&trans, &iter,
- bucket, bp_offset, bp);
+ b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp);
ret = PTR_ERR_OR_ZERO(b);
if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
continue;
if (ret)
goto err;
if (!b)
- continue;
+ goto next;
- ret = bch2_btree_node_rewrite(&trans, &iter, b, 0);
- bch2_trans_iter_exit(&trans, &iter);
+ ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
+ bch2_trans_iter_exit(trans, &iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate,
c->opts.btree_node_size >> 9);
- atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
- atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
+ if (ctxt->stats) {
+ atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
+ atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
+ }
}
-
- bp_offset++;
+next:
+ bp_pos = bpos_nosnap_successor(bp_pos);
}
- trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, ret);
-
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && gen >= 0) {
- bch2_trans_unlock(&trans);
- move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads));
- closure_sync(&ctxt->cl);
- if (!ctxt->write_error)
- lockrestart_do(&trans, verify_bucket_evacuated(&trans, bucket, gen));
- }
+ trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret);
err:
- bch2_trans_exit(&trans);
bch2_bkey_buf_exit(&sk, c);
return ret;
}
struct write_point_specifier wp,
bool wait_on_copygc)
{
+ struct btree_trans trans;
struct moving_context ctxt;
int ret;
+ bch2_trans_init(&trans, c, 0, 0);
bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
- ret = __bch2_evacuate_bucket(&ctxt, bucket, gen, data_opts);
+ ret = __bch2_evacuate_bucket(&trans, &ctxt, NULL, bucket, gen, data_opts);
bch2_moving_ctxt_exit(&ctxt);
+ bch2_trans_exit(&trans);
return ret;
}
return ret;
}
+
+void bch2_data_jobs_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct bch_move_stats *stats;
+
+ mutex_lock(&c->data_progress_lock);
+ list_for_each_entry(stats, &c->data_progress_list, list) {
+ prt_printf(out, "%s: data type %s btree_id %s position: ",
+ stats->name,
+ bch2_data_types[stats->data_type],
+ bch2_btree_ids[stats->btree_id]);
+ bch2_bpos_to_text(out, stats->pos);
+ prt_printf(out, "%s", "\n");
+ }
+ mutex_unlock(&c->data_progress_lock);
+}
+
+static void bch2_moving_ctxt_to_text(struct printbuf *out, struct moving_context *ctxt)
+{
+ struct moving_io *io;
+
+ prt_printf(out, "%ps:", ctxt->fn);
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ prt_printf(out, "reads: %u sectors %u",
+ atomic_read(&ctxt->read_ios),
+ atomic_read(&ctxt->read_sectors));
+ prt_newline(out);
+
+ prt_printf(out, "writes: %u sectors %u",
+ atomic_read(&ctxt->write_ios),
+ atomic_read(&ctxt->write_sectors));
+ prt_newline(out);
+
+ printbuf_indent_add(out, 2);
+
+ mutex_lock(&ctxt->lock);
+ list_for_each_entry(io, &ctxt->ios, io_list) {
+ bch2_write_op_to_text(out, &io->write.op);
+ }
+ mutex_unlock(&ctxt->lock);
+
+ printbuf_indent_sub(out, 4);
+}
+
+void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct moving_context *ctxt;
+
+ mutex_lock(&c->moving_context_lock);
+ list_for_each_entry(ctxt, &c->moving_context_list, list)
+ bch2_moving_ctxt_to_text(out, ctxt);
+ mutex_unlock(&c->moving_context_lock);
+}
+
+void bch2_fs_move_init(struct bch_fs *c)
+{
+ INIT_LIST_HEAD(&c->moving_context_list);
+ mutex_init(&c->moving_context_lock);
+
+ INIT_LIST_HEAD(&c->data_progress_list);
+ mutex_init(&c->data_progress_lock);
+}