3 #include "alloc_foreground.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
8 #include "disk_groups.h"
11 #include "journal_reclaim.h"
17 #include <linux/ioprio.h>
18 #include <linux/kthread.h>
20 #include <trace/events/bcachefs.h>
22 #define SECTORS_IN_FLIGHT_PER_DEVICE 2048
25 struct list_head list;
29 unsigned read_sectors;
30 unsigned write_sectors;
32 struct bch_read_bio rbio;
34 struct migrate_write write;
35 /* Must be last since it is variable size */
36 struct bio_vec bi_inline_vecs[0];
39 struct moving_context {
40 /* Closure for waiting on all reads and writes to complete */
43 struct bch_move_stats *stats;
45 struct list_head reads;
47 /* in flight sectors: */
48 atomic_t read_sectors;
49 atomic_t write_sectors;
51 wait_queue_head_t wait;
54 static int bch2_migrate_index_update(struct bch_write_op *op)
56 struct bch_fs *c = op->c;
57 struct btree_trans trans;
58 struct btree_iter *iter;
59 struct migrate_write *m =
60 container_of(op, struct migrate_write, op);
61 struct keylist *keys = &op->insert_keys;
64 bch2_trans_init(&trans, c);
65 bch2_trans_preload_iters(&trans);
67 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
68 bkey_start_pos(&bch2_keylist_front(keys)->k),
69 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
72 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
73 struct bkey_i_extent *insert, *new =
74 bkey_i_to_extent(bch2_keylist_front(keys));
75 BKEY_PADDED(k) _new, _insert;
76 const union bch_extent_entry *entry;
77 struct extent_ptr_decoded p;
78 bool did_work = false;
85 if (bversion_cmp(k.k->version, new->k.version) ||
86 !bkey_extent_is_data(k.k) ||
87 !bch2_extent_matches_ptr(c, bkey_s_c_to_extent(k),
91 if (m->data_cmd == DATA_REWRITE &&
92 !bch2_extent_has_device(bkey_s_c_to_extent(k),
93 m->data_opts.rewrite_dev))
96 bkey_reassemble(&_insert.k, k);
97 insert = bkey_i_to_extent(&_insert.k);
99 bkey_copy(&_new.k, bch2_keylist_front(keys));
100 new = bkey_i_to_extent(&_new.k);
102 bch2_cut_front(iter->pos, &insert->k_i);
103 bch2_cut_back(new->k.p, &insert->k);
104 bch2_cut_back(insert->k.p, &new->k);
106 if (m->data_cmd == DATA_REWRITE)
107 bch2_bkey_drop_device(extent_i_to_s(insert).s,
108 m->data_opts.rewrite_dev);
110 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) {
111 if (bch2_extent_has_device(extent_i_to_s_c(insert), p.ptr.dev)) {
113 * raced with another move op? extent already
114 * has a pointer to the device we just wrote
120 bch2_extent_ptr_decoded_append(insert, &p);
127 bch2_extent_narrow_crcs(insert,
128 (struct bch_extent_crc_unpacked) { 0 });
129 bch2_extent_normalize(c, extent_i_to_s(insert).s);
130 bch2_extent_mark_replicas_cached(c, extent_i_to_s(insert),
131 op->opts.background_target,
132 op->opts.data_replicas);
135 * If we're not fully overwriting @k, and it's compressed, we
136 * need a reservation for all the pointers in @insert
138 nr = bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(&insert->k_i)) -
141 if (insert->k.size < k.k->size &&
142 bch2_extent_is_compressed(k) &&
144 ret = bch2_disk_reservation_add(c, &op->res,
145 keylist_sectors(keys) * nr, 0);
149 m->nr_ptrs_reserved += nr;
153 bch2_trans_update(&trans,
154 BTREE_INSERT_ENTRY(iter, &insert->k_i));
156 ret = bch2_trans_commit(&trans, &op->res,
160 BTREE_INSERT_USE_RESERVE|
161 m->data_opts.btree_insert_flags);
163 atomic_long_inc(&c->extent_migrate_done);
169 while (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) >= 0) {
170 bch2_keylist_pop_front(keys);
171 if (bch2_keylist_empty(keys))
175 bch2_cut_front(iter->pos, bch2_keylist_front(keys));
179 atomic64_add(k.k->p.offset - iter->pos.offset,
180 &m->ctxt->stats->sectors_raced);
181 atomic_long_inc(&c->extent_migrate_raced);
182 trace_move_race(&new->k);
183 bch2_btree_iter_next_slot(iter);
187 bch2_trans_exit(&trans);
188 BUG_ON(ret == -EINTR);
192 void bch2_migrate_read_done(struct migrate_write *m, struct bch_read_bio *rbio)
194 /* write bio must own pages: */
195 BUG_ON(!m->op.wbio.bio.bi_vcnt);
197 m->ptr = rbio->pick.ptr;
198 m->offset = rbio->pos.offset - rbio->pick.crc.offset;
199 m->op.devs_have = rbio->devs_have;
200 m->op.pos = rbio->pos;
201 m->op.version = rbio->version;
202 m->op.crc = rbio->pick.crc;
203 m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9;
205 if (bch2_csum_type_is_encryption(m->op.crc.csum_type)) {
206 m->op.nonce = m->op.crc.nonce + m->op.crc.offset;
207 m->op.csum_type = m->op.crc.csum_type;
210 if (m->data_cmd == DATA_REWRITE)
211 bch2_dev_list_drop_dev(&m->op.devs_have, m->data_opts.rewrite_dev);
214 int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
215 struct write_point_specifier wp,
216 struct bch_io_opts io_opts,
217 enum data_cmd data_cmd,
218 struct data_opts data_opts,
223 m->data_cmd = data_cmd;
224 m->data_opts = data_opts;
225 m->nr_ptrs_reserved = 0;
227 bch2_write_op_init(&m->op, c, io_opts);
228 m->op.compression_type =
229 bch2_compression_opt_to_type[io_opts.background_compression ?:
230 io_opts.compression];
231 m->op.target = data_opts.target,
232 m->op.write_point = wp;
234 if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
235 m->op.alloc_reserve = RESERVE_MOVINGGC;
237 m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS|
238 BCH_WRITE_PAGES_STABLE|
239 BCH_WRITE_PAGES_OWNED|
240 BCH_WRITE_DATA_ENCODED;
242 m->op.nr_replicas = 1;
243 m->op.nr_replicas_required = 1;
244 m->op.index_update_fn = bch2_migrate_index_update;
247 case DATA_ADD_REPLICAS: {
249 * DATA_ADD_REPLICAS is used for moving data to a different
250 * device in the background, and due to compression the new copy
251 * might take up more space than the old copy:
254 int nr = (int) io_opts.data_replicas -
255 bch2_bkey_nr_dirty_ptrs(k);
257 int nr = (int) io_opts.data_replicas;
260 m->op.nr_replicas = m->nr_ptrs_reserved = nr;
262 ret = bch2_disk_reservation_get(c, &m->op.res,
263 k.k->size, m->op.nr_replicas, 0);
270 const union bch_extent_entry *entry;
271 struct extent_ptr_decoded p;
272 unsigned compressed_sectors = 0;
274 extent_for_each_ptr_decode(bkey_s_c_to_extent(k), p, entry)
276 p.crc.compression_type != BCH_COMPRESSION_NONE &&
277 bch2_dev_in_target(c, p.ptr.dev, data_opts.target))
278 compressed_sectors += p.crc.compressed_size;
280 if (compressed_sectors) {
281 ret = bch2_disk_reservation_add(c, &m->op.res,
283 BCH_DISK_RESERVATION_NOFAIL);
290 m->op.flags |= BCH_WRITE_ALLOC_NOWAIT;
291 m->op.flags |= BCH_WRITE_CACHED;
300 static void move_free(struct closure *cl)
302 struct moving_io *io = container_of(cl, struct moving_io, cl);
303 struct moving_context *ctxt = io->write.ctxt;
307 bch2_disk_reservation_put(io->write.op.c, &io->write.op.res);
309 bio_for_each_segment_all(bv, &io->write.op.wbio.bio, i)
311 __free_page(bv->bv_page);
313 wake_up(&ctxt->wait);
318 static void move_write_done(struct closure *cl)
320 struct moving_io *io = container_of(cl, struct moving_io, cl);
322 atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
323 closure_return_with_destructor(cl, move_free);
326 static void move_write(struct closure *cl)
328 struct moving_io *io = container_of(cl, struct moving_io, cl);
330 if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
331 closure_return_with_destructor(cl, move_free);
335 bch2_migrate_read_done(&io->write, &io->rbio);
337 atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
338 closure_call(&io->write.op.cl, bch2_write, NULL, cl);
339 continue_at(cl, move_write_done, NULL);
342 static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
344 struct moving_io *io =
345 list_first_entry_or_null(&ctxt->reads, struct moving_io, list);
347 return io && io->read_completed ? io : NULL;
350 static void move_read_endio(struct bio *bio)
352 struct moving_io *io = container_of(bio, struct moving_io, rbio.bio);
353 struct moving_context *ctxt = io->write.ctxt;
355 atomic_sub(io->read_sectors, &ctxt->read_sectors);
356 io->read_completed = true;
358 if (next_pending_write(ctxt))
359 wake_up(&ctxt->wait);
361 closure_put(&ctxt->cl);
364 static void do_pending_writes(struct moving_context *ctxt)
366 struct moving_io *io;
368 while ((io = next_pending_write(ctxt))) {
370 closure_call(&io->cl, move_write, NULL, &ctxt->cl);
374 #define move_ctxt_wait_event(_ctxt, _cond) \
376 do_pending_writes(_ctxt); \
380 __wait_event((_ctxt)->wait, \
381 next_pending_write(_ctxt) || (_cond)); \
384 static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
386 unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
388 move_ctxt_wait_event(ctxt,
389 !atomic_read(&ctxt->write_sectors) ||
390 atomic_read(&ctxt->write_sectors) != sectors_pending);
393 static int bch2_move_extent(struct bch_fs *c,
394 struct moving_context *ctxt,
395 struct write_point_specifier wp,
396 struct bch_io_opts io_opts,
397 struct bkey_s_c_extent e,
398 enum data_cmd data_cmd,
399 struct data_opts data_opts)
401 struct moving_io *io;
402 const union bch_extent_entry *entry;
403 struct extent_ptr_decoded p;
404 unsigned sectors = e.k->size, pages;
407 move_ctxt_wait_event(ctxt,
408 atomic_read(&ctxt->write_sectors) <
409 SECTORS_IN_FLIGHT_PER_DEVICE);
411 move_ctxt_wait_event(ctxt,
412 atomic_read(&ctxt->read_sectors) <
413 SECTORS_IN_FLIGHT_PER_DEVICE);
415 /* write path might have to decompress data: */
416 extent_for_each_ptr_decode(e, p, entry)
417 sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
419 pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
420 io = kzalloc(sizeof(struct moving_io) +
421 sizeof(struct bio_vec) * pages, GFP_KERNEL);
425 io->write.ctxt = ctxt;
426 io->read_sectors = e.k->size;
427 io->write_sectors = e.k->size;
429 bio_init(&io->write.op.wbio.bio, io->bi_inline_vecs, pages);
430 bio_set_prio(&io->write.op.wbio.bio,
431 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
432 io->write.op.wbio.bio.bi_iter.bi_size = sectors << 9;
434 bch2_bio_map(&io->write.op.wbio.bio, NULL);
435 if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, GFP_KERNEL))
438 io->rbio.opts = io_opts;
439 bio_init(&io->rbio.bio, io->bi_inline_vecs, pages);
440 io->rbio.bio.bi_vcnt = pages;
441 bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
442 io->rbio.bio.bi_iter.bi_size = sectors << 9;
444 bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0);
445 io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(e.k);
446 io->rbio.bio.bi_end_io = move_read_endio;
448 ret = bch2_migrate_write_init(c, &io->write, wp, io_opts,
449 data_cmd, data_opts, e.s_c);
453 atomic64_inc(&ctxt->stats->keys_moved);
454 atomic64_add(e.k->size, &ctxt->stats->sectors_moved);
456 trace_move_extent(e.k);
458 atomic_add(io->read_sectors, &ctxt->read_sectors);
459 list_add_tail(&io->list, &ctxt->reads);
462 * dropped by move_read_endio() - guards against use after free of
463 * ctxt when doing wakeup
465 closure_get(&ctxt->cl);
466 bch2_read_extent(c, &io->rbio, e.s_c,
468 BCH_READ_LAST_FRAGMENT);
471 bio_free_pages(&io->write.op.wbio.bio);
475 trace_move_alloc_fail(e.k);
479 int bch2_move_data(struct bch_fs *c,
480 struct bch_ratelimit *rate,
481 struct write_point_specifier wp,
484 move_pred_fn pred, void *arg,
485 struct bch_move_stats *stats)
487 bool kthread = (current->flags & PF_KTHREAD) != 0;
488 struct moving_context ctxt = { .stats = stats };
489 struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
491 struct btree_trans trans;
492 struct btree_iter *iter;
494 struct data_opts data_opts;
495 enum data_cmd data_cmd;
496 u64 delay, cur_inum = U64_MAX;
499 closure_init_stack(&ctxt.cl);
500 INIT_LIST_HEAD(&ctxt.reads);
501 init_waitqueue_head(&ctxt.wait);
503 bch2_trans_init(&trans, c);
505 stats->data_type = BCH_DATA_USER;
506 stats->btree_id = BTREE_ID_EXTENTS;
507 stats->pos = POS_MIN;
509 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, start,
510 BTREE_ITER_PREFETCH);
513 bch2_ratelimit_reset(rate);
517 delay = rate ? bch2_ratelimit_delay(rate) : 0;
520 bch2_trans_unlock(&trans);
521 set_current_state(TASK_INTERRUPTIBLE);
524 if (kthread && (ret = kthread_should_stop())) {
525 __set_current_state(TASK_RUNNING);
530 schedule_timeout(delay);
532 if (unlikely(freezing(current))) {
533 bch2_trans_unlock(&trans);
534 move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads));
539 k = bch2_btree_iter_peek(iter);
541 stats->pos = iter->pos;
548 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
551 if (!bkey_extent_is_data(k.k))
554 if (cur_inum != k.k->p.inode) {
555 struct bch_inode_unpacked inode;
557 /* don't hold btree locks while looking up inode: */
558 bch2_trans_unlock(&trans);
560 io_opts = bch2_opts_to_inode_opts(c->opts);
561 if (!bch2_inode_find_by_inum(c, k.k->p.inode, &inode))
562 bch2_io_opts_apply(&io_opts, bch2_inode_opts_get(&inode));
563 cur_inum = k.k->p.inode;
567 switch ((data_cmd = pred(c, arg, k, &io_opts, &data_opts))) {
572 case DATA_ADD_REPLICAS:
580 /* unlock before doing IO: */
581 bkey_reassemble(&tmp.k, k);
582 k = bkey_i_to_s_c(&tmp.k);
583 bch2_trans_unlock(&trans);
585 ret2 = bch2_move_extent(c, &ctxt, wp, io_opts,
586 bkey_s_c_to_extent(k),
587 data_cmd, data_opts);
589 if (ret2 == -ENOMEM) {
590 /* memory allocation failure, wait for some IO to finish */
591 bch2_move_ctxt_wait_for_io(&ctxt);
595 /* XXX signal failure */
600 bch2_ratelimit_increment(rate, k.k->size);
602 atomic64_add(k.k->size * bch2_bkey_nr_dirty_ptrs(k),
603 &stats->sectors_seen);
605 bch2_btree_iter_next(iter);
606 bch2_trans_cond_resched(&trans);
609 bch2_trans_exit(&trans);
611 move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads));
612 closure_sync(&ctxt.cl);
614 EBUG_ON(atomic_read(&ctxt.write_sectors));
617 atomic64_read(&stats->sectors_moved),
618 atomic64_read(&stats->keys_moved));
623 static int bch2_gc_data_replicas(struct bch_fs *c)
625 struct btree_trans trans;
626 struct btree_iter *iter;
630 bch2_trans_init(&trans, c);
632 mutex_lock(&c->replicas_gc_lock);
633 bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED));
635 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
636 BTREE_ITER_PREFETCH, k, ret) {
637 ret = bch2_mark_bkey_replicas(c, k);
641 ret = bch2_trans_exit(&trans) ?: ret;
643 bch2_replicas_gc_end(c, ret);
644 mutex_unlock(&c->replicas_gc_lock);
649 static int bch2_gc_btree_replicas(struct bch_fs *c)
651 struct btree_trans trans;
652 struct btree_iter *iter;
657 bch2_trans_init(&trans, c);
659 mutex_lock(&c->replicas_gc_lock);
660 bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
662 for (id = 0; id < BTREE_ID_NR; id++) {
663 for_each_btree_node(&trans, iter, id, POS_MIN,
664 BTREE_ITER_PREFETCH, b) {
665 ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(&b->key));
667 bch2_trans_cond_resched(&trans);
670 ret = bch2_trans_iter_free(&trans, iter) ?: ret;
673 bch2_trans_exit(&trans);
675 bch2_replicas_gc_end(c, ret);
676 mutex_unlock(&c->replicas_gc_lock);
681 static int bch2_move_btree(struct bch_fs *c,
684 struct bch_move_stats *stats)
686 struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
687 struct btree_trans trans;
688 struct btree_iter *iter;
691 struct data_opts data_opts;
695 bch2_trans_init(&trans, c);
697 stats->data_type = BCH_DATA_BTREE;
699 for (id = 0; id < BTREE_ID_NR; id++) {
700 stats->btree_id = id;
702 for_each_btree_node(&trans, iter, id, POS_MIN,
703 BTREE_ITER_PREFETCH, b) {
704 stats->pos = iter->pos;
706 switch ((cmd = pred(c, arg,
707 bkey_i_to_s_c(&b->key),
708 &io_opts, &data_opts))) {
713 case DATA_ADD_REPLICAS:
720 ret = bch2_btree_node_rewrite(c, iter,
721 b->data->keys.seq, 0) ?: ret;
723 bch2_trans_cond_resched(&trans);
726 ret = bch2_trans_iter_free(&trans, iter) ?: ret;
729 bch2_trans_exit(&trans);
735 static enum data_cmd scrub_pred(struct bch_fs *c, void *arg,
737 struct bch_io_opts *io_opts,
738 struct data_opts *data_opts)
744 static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg,
746 struct bch_io_opts *io_opts,
747 struct data_opts *data_opts)
749 unsigned nr_good = bch2_bkey_durability(c, k);
750 unsigned replicas = 0;
753 case KEY_TYPE_btree_ptr:
754 replicas = c->opts.metadata_replicas;
756 case KEY_TYPE_extent:
757 replicas = io_opts->data_replicas;
761 if (!nr_good || nr_good >= replicas)
764 data_opts->target = 0;
765 data_opts->btree_insert_flags = 0;
766 return DATA_ADD_REPLICAS;
769 static enum data_cmd migrate_pred(struct bch_fs *c, void *arg,
771 struct bch_io_opts *io_opts,
772 struct data_opts *data_opts)
774 struct bch_ioctl_data *op = arg;
776 if (!bch2_bkey_has_device(k, op->migrate.dev))
779 data_opts->target = 0;
780 data_opts->btree_insert_flags = 0;
781 data_opts->rewrite_dev = op->migrate.dev;
785 int bch2_data_job(struct bch_fs *c,
786 struct bch_move_stats *stats,
787 struct bch_ioctl_data op)
792 case BCH_DATA_OP_REREPLICATE:
793 stats->data_type = BCH_DATA_JOURNAL;
794 ret = bch2_journal_flush_device_pins(&c->journal, -1);
796 ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret;
799 closure_wait_event(&c->btree_interior_update_wait,
800 !bch2_btree_interior_updates_nr_pending(c) ||
801 c->btree_roots_dirty);
802 if (!bch2_btree_interior_updates_nr_pending(c))
804 bch2_journal_meta(&c->journal);
807 ret = bch2_gc_btree_replicas(c) ?: ret;
809 ret = bch2_move_data(c, NULL,
810 writepoint_hashed((unsigned long) current),
813 rereplicate_pred, c, stats) ?: ret;
814 ret = bch2_gc_data_replicas(c) ?: ret;
816 case BCH_DATA_OP_MIGRATE:
817 if (op.migrate.dev >= c->sb.nr_devices)
820 stats->data_type = BCH_DATA_JOURNAL;
821 ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
823 ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret;
824 ret = bch2_gc_btree_replicas(c) ?: ret;
826 ret = bch2_move_data(c, NULL,
827 writepoint_hashed((unsigned long) current),
830 migrate_pred, &op, stats) ?: ret;
831 ret = bch2_gc_data_replicas(c) ?: ret;