]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/move.c
8eb49381b030942e3b9631f922b204faeb2b0599
[bcachefs-tools-debian] / libbcachefs / move.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "bkey_buf.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "buckets.h"
10 #include "disk_groups.h"
11 #include "ec.h"
12 #include "inode.h"
13 #include "io.h"
14 #include "journal_reclaim.h"
15 #include "move.h"
16 #include "replicas.h"
17 #include "subvolume.h"
18 #include "super-io.h"
19 #include "keylist.h"
20
21 #include <linux/ioprio.h>
22 #include <linux/kthread.h>
23
24 #include <trace/events/bcachefs.h>
25
26 #define SECTORS_IN_FLIGHT_PER_DEVICE    2048
27
28 struct moving_io {
29         struct list_head        list;
30         struct closure          cl;
31         bool                    read_completed;
32
33         unsigned                read_sectors;
34         unsigned                write_sectors;
35
36         struct bch_read_bio     rbio;
37
38         struct migrate_write    write;
39         /* Must be last since it is variable size */
40         struct bio_vec          bi_inline_vecs[0];
41 };
42
43 struct moving_context {
44         /* Closure for waiting on all reads and writes to complete */
45         struct closure          cl;
46
47         struct bch_move_stats   *stats;
48
49         struct list_head        reads;
50
51         /* in flight sectors: */
52         atomic_t                read_sectors;
53         atomic_t                write_sectors;
54
55         wait_queue_head_t       wait;
56 };
57
58 static int insert_snapshot_whiteouts(struct btree_trans *trans,
59                                      enum btree_id id,
60                                      struct bpos old_pos,
61                                      struct bpos new_pos)
62 {
63         struct bch_fs *c = trans->c;
64         struct btree_iter iter, update_iter;
65         struct bkey_s_c k;
66         struct snapshots_seen s;
67         int ret;
68
69         if (!btree_type_has_snapshots(id))
70                 return 0;
71
72         snapshots_seen_init(&s);
73
74         if (!bkey_cmp(old_pos, new_pos))
75                 return 0;
76
77         if (!snapshot_t(c, old_pos.snapshot)->children[0])
78                 return 0;
79
80         bch2_trans_iter_init(trans, &iter, id, old_pos,
81                              BTREE_ITER_NOT_EXTENTS|
82                              BTREE_ITER_ALL_SNAPSHOTS);
83         while (1) {
84 next:
85                 k = bch2_btree_iter_prev(&iter);
86                 ret = bkey_err(k);
87                 if (ret)
88                         break;
89
90                 if (bkey_cmp(old_pos, k.k->p))
91                         break;
92
93                 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
94                         struct bkey_i *update;
95                         size_t i;
96
97                         for (i = 0; i < s.nr; i++)
98                                 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, s.d[i]))
99                                         goto next;
100
101                         update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
102
103                         ret = PTR_ERR_OR_ZERO(update);
104                         if (ret)
105                                 break;
106
107                         bkey_init(&update->k);
108                         update->k.p = new_pos;
109                         update->k.p.snapshot = k.k->p.snapshot;
110
111                         bch2_trans_iter_init(trans, &update_iter, id, update->k.p,
112                                              BTREE_ITER_NOT_EXTENTS|
113                                              BTREE_ITER_ALL_SNAPSHOTS|
114                                              BTREE_ITER_INTENT);
115                         ret   = bch2_btree_iter_traverse(&update_iter) ?:
116                                 bch2_trans_update(trans, &update_iter, update,
117                                           BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
118                         bch2_trans_iter_exit(trans, &update_iter);
119                         if (ret)
120                                 break;
121
122                         ret = snapshots_seen_add(c, &s, k.k->p.snapshot);
123                         if (ret)
124                                 break;
125                 }
126         }
127         bch2_trans_iter_exit(trans, &iter);
128         kfree(s.d);
129
130         return ret;
131 }
132
133 static int bch2_migrate_index_update(struct bch_write_op *op)
134 {
135         struct bch_fs *c = op->c;
136         struct btree_trans trans;
137         struct btree_iter iter;
138         struct migrate_write *m =
139                 container_of(op, struct migrate_write, op);
140         struct open_bucket *ec_ob = ec_open_bucket(c, &op->open_buckets);
141         struct keylist *keys = &op->insert_keys;
142         struct bkey_buf _new, _insert;
143         int ret = 0;
144
145         bch2_bkey_buf_init(&_new);
146         bch2_bkey_buf_init(&_insert);
147         bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
148
149         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
150
151         bch2_trans_iter_init(&trans, &iter, m->btree_id,
152                              bkey_start_pos(&bch2_keylist_front(keys)->k),
153                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
154
155         while (1) {
156                 struct bkey_s_c k;
157                 struct bkey_i *insert;
158                 struct bkey_i_extent *new;
159                 const union bch_extent_entry *entry;
160                 struct extent_ptr_decoded p;
161                 struct bpos next_pos;
162                 bool did_work = false;
163                 bool should_check_enospc;
164                 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
165
166                 bch2_trans_begin(&trans);
167
168                 k = bch2_btree_iter_peek_slot(&iter);
169                 ret = bkey_err(k);
170                 if (ret)
171                         goto err;
172
173                 new = bkey_i_to_extent(bch2_keylist_front(keys));
174
175                 if (bversion_cmp(k.k->version, new->k.version) ||
176                     !bch2_bkey_matches_ptr(c, k, m->ptr, m->offset))
177                         goto nomatch;
178
179                 bkey_reassemble(_insert.k, k);
180                 insert = _insert.k;
181
182                 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
183                 new = bkey_i_to_extent(_new.k);
184                 bch2_cut_front(iter.pos, &new->k_i);
185
186                 bch2_cut_front(iter.pos,        insert);
187                 bch2_cut_back(new->k.p,         insert);
188                 bch2_cut_back(insert->k.p,      &new->k_i);
189
190                 if (m->data_cmd == DATA_REWRITE) {
191                         struct bch_extent_ptr *new_ptr, *old_ptr = (void *)
192                                 bch2_bkey_has_device(bkey_i_to_s_c(insert),
193                                                      m->data_opts.rewrite_dev);
194                         if (!old_ptr)
195                                 goto nomatch;
196
197                         if (old_ptr->cached)
198                                 extent_for_each_ptr(extent_i_to_s(new), new_ptr)
199                                         new_ptr->cached = true;
200
201                         __bch2_bkey_drop_ptr(bkey_i_to_s(insert), old_ptr);
202                 }
203
204                 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) {
205                         if (bch2_bkey_has_device(bkey_i_to_s_c(insert), p.ptr.dev)) {
206                                 /*
207                                  * raced with another move op? extent already
208                                  * has a pointer to the device we just wrote
209                                  * data to
210                                  */
211                                 continue;
212                         }
213
214                         bch2_extent_ptr_decoded_append(insert, &p);
215                         did_work = true;
216                 }
217
218                 if (!did_work)
219                         goto nomatch;
220
221                 bch2_bkey_narrow_crcs(insert,
222                                 (struct bch_extent_crc_unpacked) { 0 });
223                 bch2_extent_normalize(c, bkey_i_to_s(insert));
224                 bch2_bkey_mark_replicas_cached(c, bkey_i_to_s(insert),
225                                                op->opts.background_target,
226                                                op->opts.data_replicas);
227
228                 ret = bch2_sum_sector_overwrites(&trans, &iter, insert,
229                                                  &should_check_enospc,
230                                                  &i_sectors_delta,
231                                                  &disk_sectors_delta);
232                 if (ret)
233                         goto err;
234
235                 if (disk_sectors_delta > (s64) op->res.sectors) {
236                         ret = bch2_disk_reservation_add(c, &op->res,
237                                                 disk_sectors_delta - op->res.sectors,
238                                                 !should_check_enospc
239                                                 ? BCH_DISK_RESERVATION_NOFAIL : 0);
240                         if (ret)
241                                 goto out;
242                 }
243
244                 next_pos = insert->k.p;
245
246                 ret   = insert_snapshot_whiteouts(&trans, m->btree_id,
247                                                   k.k->p, insert->k.p) ?:
248                         bch2_trans_update(&trans, &iter, insert,
249                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
250                         bch2_trans_commit(&trans, &op->res,
251                                 op_journal_seq(op),
252                                 BTREE_INSERT_NOFAIL|
253                                 m->data_opts.btree_insert_flags);
254                 if (!ret) {
255                         bch2_btree_iter_set_pos(&iter, next_pos);
256                         atomic_long_inc(&c->extent_migrate_done);
257                         if (ec_ob)
258                                 bch2_ob_add_backpointer(c, ec_ob, &insert->k);
259                 }
260 err:
261                 if (ret == -EINTR)
262                         ret = 0;
263                 if (ret)
264                         break;
265 next:
266                 while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
267                         bch2_keylist_pop_front(keys);
268                         if (bch2_keylist_empty(keys))
269                                 goto out;
270                 }
271                 continue;
272 nomatch:
273                 if (m->ctxt) {
274                         BUG_ON(k.k->p.offset <= iter.pos.offset);
275                         atomic64_inc(&m->ctxt->stats->keys_raced);
276                         atomic64_add(k.k->p.offset - iter.pos.offset,
277                                      &m->ctxt->stats->sectors_raced);
278                 }
279                 atomic_long_inc(&c->extent_migrate_raced);
280                 trace_move_race(&new->k);
281                 bch2_btree_iter_advance(&iter);
282                 goto next;
283         }
284 out:
285         bch2_trans_iter_exit(&trans, &iter);
286         bch2_trans_exit(&trans);
287         bch2_bkey_buf_exit(&_insert, c);
288         bch2_bkey_buf_exit(&_new, c);
289         BUG_ON(ret == -EINTR);
290         return ret;
291 }
292
293 void bch2_migrate_read_done(struct migrate_write *m, struct bch_read_bio *rbio)
294 {
295         /* write bio must own pages: */
296         BUG_ON(!m->op.wbio.bio.bi_vcnt);
297
298         m->ptr          = rbio->pick.ptr;
299         m->offset       = rbio->data_pos.offset - rbio->pick.crc.offset;
300         m->op.devs_have = rbio->devs_have;
301         m->op.pos       = rbio->data_pos;
302         m->op.version   = rbio->version;
303         m->op.crc       = rbio->pick.crc;
304         m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9;
305
306         if (m->data_cmd == DATA_REWRITE)
307                 bch2_dev_list_drop_dev(&m->op.devs_have, m->data_opts.rewrite_dev);
308 }
309
310 int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
311                             struct write_point_specifier wp,
312                             struct bch_io_opts io_opts,
313                             enum data_cmd data_cmd,
314                             struct data_opts data_opts,
315                             enum btree_id btree_id,
316                             struct bkey_s_c k)
317 {
318         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
319         const union bch_extent_entry *entry;
320         struct bch_extent_crc_unpacked crc;
321         struct extent_ptr_decoded p;
322         int ret;
323
324         m->btree_id     = btree_id;
325         m->data_cmd     = data_cmd;
326         m->data_opts    = data_opts;
327         m->nr_ptrs_reserved = 0;
328
329         bch2_write_op_init(&m->op, c, io_opts);
330
331         if (!bch2_bkey_is_incompressible(k))
332                 m->op.compression_type =
333                         bch2_compression_opt_to_type[io_opts.background_compression ?:
334                                                      io_opts.compression];
335         else
336                 m->op.incompressible = true;
337
338         m->op.target    = data_opts.target,
339         m->op.write_point = wp;
340
341         /*
342          * op->csum_type is normally initialized from the fs/file's current
343          * options - but if an extent is encrypted, we require that it stays
344          * encrypted:
345          */
346         bkey_for_each_crc(k.k, ptrs, crc, entry)
347                 if (bch2_csum_type_is_encryption(crc.csum_type)) {
348                         m->op.nonce     = crc.nonce + crc.offset;
349                         m->op.csum_type = crc.csum_type;
350                         break;
351                 }
352
353         if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) {
354                 m->op.alloc_reserve = RESERVE_movinggc;
355         } else {
356                 /* XXX: this should probably be passed in */
357                 m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
358         }
359
360         m->op.flags |= BCH_WRITE_PAGES_STABLE|
361                 BCH_WRITE_PAGES_OWNED|
362                 BCH_WRITE_DATA_ENCODED|
363                 BCH_WRITE_FROM_INTERNAL;
364
365         m->op.nr_replicas       = data_opts.nr_replicas;
366         m->op.nr_replicas_required = data_opts.nr_replicas;
367         m->op.index_update_fn   = bch2_migrate_index_update;
368
369         switch (data_cmd) {
370         case DATA_ADD_REPLICAS: {
371                 /*
372                  * DATA_ADD_REPLICAS is used for moving data to a different
373                  * device in the background, and due to compression the new copy
374                  * might take up more space than the old copy:
375                  */
376 #if 0
377                 int nr = (int) io_opts.data_replicas -
378                         bch2_bkey_nr_ptrs_allocated(k);
379 #endif
380                 int nr = (int) io_opts.data_replicas;
381
382                 if (nr > 0) {
383                         m->op.nr_replicas = m->nr_ptrs_reserved = nr;
384
385                         ret = bch2_disk_reservation_get(c, &m->op.res,
386                                         k.k->size, m->op.nr_replicas, 0);
387                         if (ret)
388                                 return ret;
389                 }
390                 break;
391         }
392         case DATA_REWRITE: {
393                 unsigned compressed_sectors = 0;
394
395                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
396                         if (p.ptr.dev == data_opts.rewrite_dev) {
397                                 if (p.ptr.cached)
398                                         m->op.flags |= BCH_WRITE_CACHED;
399
400                                 if (!p.ptr.cached &&
401                                     crc_is_compressed(p.crc))
402                                         compressed_sectors += p.crc.compressed_size;
403                         }
404
405                 if (compressed_sectors) {
406                         ret = bch2_disk_reservation_add(c, &m->op.res,
407                                         k.k->size * m->op.nr_replicas,
408                                         BCH_DISK_RESERVATION_NOFAIL);
409                         if (ret)
410                                 return ret;
411                 }
412                 break;
413         }
414         case DATA_PROMOTE:
415                 m->op.flags     |= BCH_WRITE_ALLOC_NOWAIT;
416                 m->op.flags     |= BCH_WRITE_CACHED;
417                 break;
418         default:
419                 BUG();
420         }
421
422         return 0;
423 }
424
425 static void move_free(struct closure *cl)
426 {
427         struct moving_io *io = container_of(cl, struct moving_io, cl);
428         struct moving_context *ctxt = io->write.ctxt;
429         struct bvec_iter_all iter;
430         struct bio_vec *bv;
431
432         bch2_disk_reservation_put(io->write.op.c, &io->write.op.res);
433
434         bio_for_each_segment_all(bv, &io->write.op.wbio.bio, iter)
435                 if (bv->bv_page)
436                         __free_page(bv->bv_page);
437
438         wake_up(&ctxt->wait);
439
440         kfree(io);
441 }
442
443 static void move_write_done(struct closure *cl)
444 {
445         struct moving_io *io = container_of(cl, struct moving_io, cl);
446
447         atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
448         closure_return_with_destructor(cl, move_free);
449 }
450
451 static void move_write(struct closure *cl)
452 {
453         struct moving_io *io = container_of(cl, struct moving_io, cl);
454
455         if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
456                 closure_return_with_destructor(cl, move_free);
457                 return;
458         }
459
460         bch2_migrate_read_done(&io->write, &io->rbio);
461
462         atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
463         closure_call(&io->write.op.cl, bch2_write, NULL, cl);
464         continue_at(cl, move_write_done, NULL);
465 }
466
467 static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
468 {
469         struct moving_io *io =
470                 list_first_entry_or_null(&ctxt->reads, struct moving_io, list);
471
472         return io && io->read_completed ? io : NULL;
473 }
474
475 static void move_read_endio(struct bio *bio)
476 {
477         struct moving_io *io = container_of(bio, struct moving_io, rbio.bio);
478         struct moving_context *ctxt = io->write.ctxt;
479
480         atomic_sub(io->read_sectors, &ctxt->read_sectors);
481         io->read_completed = true;
482
483         wake_up(&ctxt->wait);
484         closure_put(&ctxt->cl);
485 }
486
487 static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *trans)
488 {
489         struct moving_io *io;
490
491         if (trans)
492                 bch2_trans_unlock(trans);
493
494         while ((io = next_pending_write(ctxt))) {
495                 list_del(&io->list);
496                 closure_call(&io->cl, move_write, NULL, &ctxt->cl);
497         }
498 }
499
500 #define move_ctxt_wait_event(_ctxt, _trans, _cond)              \
501 do {                                                            \
502         do_pending_writes(_ctxt, _trans);                       \
503                                                                 \
504         if (_cond)                                              \
505                 break;                                          \
506         __wait_event((_ctxt)->wait,                             \
507                      next_pending_write(_ctxt) || (_cond));     \
508 } while (1)
509
510 static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
511                                        struct btree_trans *trans)
512 {
513         unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
514
515         move_ctxt_wait_event(ctxt, trans,
516                 !atomic_read(&ctxt->write_sectors) ||
517                 atomic_read(&ctxt->write_sectors) != sectors_pending);
518 }
519
520 static int bch2_move_extent(struct btree_trans *trans,
521                             struct moving_context *ctxt,
522                             struct write_point_specifier wp,
523                             struct bch_io_opts io_opts,
524                             enum btree_id btree_id,
525                             struct bkey_s_c k,
526                             enum data_cmd data_cmd,
527                             struct data_opts data_opts)
528 {
529         struct bch_fs *c = trans->c;
530         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
531         struct moving_io *io;
532         const union bch_extent_entry *entry;
533         struct extent_ptr_decoded p;
534         unsigned sectors = k.k->size, pages;
535         int ret = -ENOMEM;
536
537         /* write path might have to decompress data: */
538         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
539                 sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
540
541         pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
542         io = kzalloc(sizeof(struct moving_io) +
543                      sizeof(struct bio_vec) * pages, GFP_KERNEL);
544         if (!io)
545                 goto err;
546
547         io->write.ctxt          = ctxt;
548         io->read_sectors        = k.k->size;
549         io->write_sectors       = k.k->size;
550
551         bio_init(&io->write.op.wbio.bio, io->bi_inline_vecs, pages);
552         bio_set_prio(&io->write.op.wbio.bio,
553                      IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
554
555         if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9,
556                                  GFP_KERNEL))
557                 goto err_free;
558
559         io->rbio.c              = c;
560         io->rbio.opts           = io_opts;
561         bio_init(&io->rbio.bio, io->bi_inline_vecs, pages);
562         io->rbio.bio.bi_vcnt = pages;
563         bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
564         io->rbio.bio.bi_iter.bi_size = sectors << 9;
565
566         bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0);
567         io->rbio.bio.bi_iter.bi_sector  = bkey_start_offset(k.k);
568         io->rbio.bio.bi_end_io          = move_read_endio;
569
570         ret = bch2_migrate_write_init(c, &io->write, wp, io_opts,
571                                       data_cmd, data_opts, btree_id, k);
572         if (ret)
573                 goto err_free_pages;
574
575         atomic64_inc(&ctxt->stats->keys_moved);
576         atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
577
578         trace_move_extent(k.k);
579
580         atomic_add(io->read_sectors, &ctxt->read_sectors);
581         list_add_tail(&io->list, &ctxt->reads);
582
583         /*
584          * dropped by move_read_endio() - guards against use after free of
585          * ctxt when doing wakeup
586          */
587         closure_get(&ctxt->cl);
588         bch2_read_extent(trans, &io->rbio,
589                          bkey_start_pos(k.k),
590                          btree_id, k, 0,
591                          BCH_READ_NODECODE|
592                          BCH_READ_LAST_FRAGMENT);
593         return 0;
594 err_free_pages:
595         bio_free_pages(&io->write.op.wbio.bio);
596 err_free:
597         kfree(io);
598 err:
599         trace_move_alloc_fail(k.k);
600         return ret;
601 }
602
603 static int lookup_inode(struct btree_trans *trans, struct bpos pos,
604                         struct bch_inode_unpacked *inode)
605 {
606         struct btree_iter iter;
607         struct bkey_s_c k;
608         int ret;
609
610         bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, pos,
611                              BTREE_ITER_ALL_SNAPSHOTS);
612         k = bch2_btree_iter_peek(&iter);
613         ret = bkey_err(k);
614         if (ret)
615                 goto err;
616
617         if (!k.k || bkey_cmp(k.k->p, pos)) {
618                 ret = -ENOENT;
619                 goto err;
620         }
621
622         ret = bkey_is_inode(k.k) ? 0 : -EIO;
623         if (ret)
624                 goto err;
625
626         ret = bch2_inode_unpack(k, inode);
627         if (ret)
628                 goto err;
629 err:
630         bch2_trans_iter_exit(trans, &iter);
631         return ret;
632 }
633
634 static int __bch2_move_data(struct bch_fs *c,
635                 struct moving_context *ctxt,
636                 struct bch_ratelimit *rate,
637                 struct write_point_specifier wp,
638                 struct bpos start,
639                 struct bpos end,
640                 move_pred_fn pred, void *arg,
641                 struct bch_move_stats *stats,
642                 enum btree_id btree_id)
643 {
644         bool kthread = (current->flags & PF_KTHREAD) != 0;
645         struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
646         struct bkey_buf sk;
647         struct btree_trans trans;
648         struct btree_iter iter;
649         struct bkey_s_c k;
650         struct data_opts data_opts;
651         enum data_cmd data_cmd;
652         u64 delay, cur_inum = U64_MAX;
653         int ret = 0, ret2;
654
655         bch2_bkey_buf_init(&sk);
656         bch2_trans_init(&trans, c, 0, 0);
657
658         stats->data_type = BCH_DATA_user;
659         stats->btree_id = btree_id;
660         stats->pos      = start;
661
662         bch2_trans_iter_init(&trans, &iter, btree_id, start,
663                              BTREE_ITER_PREFETCH|
664                              BTREE_ITER_ALL_SNAPSHOTS);
665
666         if (rate)
667                 bch2_ratelimit_reset(rate);
668
669         while (1) {
670                 do {
671                         delay = rate ? bch2_ratelimit_delay(rate) : 0;
672
673                         if (delay) {
674                                 bch2_trans_unlock(&trans);
675                                 set_current_state(TASK_INTERRUPTIBLE);
676                         }
677
678                         if (kthread && (ret = kthread_should_stop())) {
679                                 __set_current_state(TASK_RUNNING);
680                                 goto out;
681                         }
682
683                         if (delay)
684                                 schedule_timeout(delay);
685
686                         if (unlikely(freezing(current))) {
687                                 move_ctxt_wait_event(ctxt, &trans, list_empty(&ctxt->reads));
688                                 try_to_freeze();
689                         }
690                 } while (delay);
691
692                 move_ctxt_wait_event(ctxt, &trans,
693                         atomic_read(&ctxt->write_sectors) <
694                         SECTORS_IN_FLIGHT_PER_DEVICE);
695
696                 move_ctxt_wait_event(ctxt, &trans,
697                         atomic_read(&ctxt->read_sectors) <
698                         SECTORS_IN_FLIGHT_PER_DEVICE);
699
700                 bch2_trans_begin(&trans);
701
702                 k = bch2_btree_iter_peek(&iter);
703                 if (!k.k)
704                         break;
705
706                 ret = bkey_err(k);
707                 if (ret == -EINTR)
708                         continue;
709                 if (ret)
710                         break;
711
712                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
713                         break;
714
715                 stats->pos = iter.pos;
716
717                 if (!bkey_extent_is_direct_data(k.k))
718                         goto next_nondata;
719
720                 if (btree_id == BTREE_ID_extents &&
721                     cur_inum != k.k->p.inode) {
722                         struct bch_inode_unpacked inode;
723
724                         io_opts = bch2_opts_to_inode_opts(c->opts);
725
726                         ret = lookup_inode(&trans,
727                                         SPOS(0, k.k->p.inode, k.k->p.snapshot),
728                                         &inode);
729                         if (ret == -EINTR)
730                                 continue;
731
732                         if (!ret)
733                                 bch2_io_opts_apply(&io_opts, bch2_inode_opts_get(&inode));
734
735                         cur_inum = k.k->p.inode;
736                 }
737
738                 switch ((data_cmd = pred(c, arg, k, &io_opts, &data_opts))) {
739                 case DATA_SKIP:
740                         goto next;
741                 case DATA_SCRUB:
742                         BUG();
743                 case DATA_ADD_REPLICAS:
744                 case DATA_REWRITE:
745                 case DATA_PROMOTE:
746                         break;
747                 default:
748                         BUG();
749                 }
750
751                 /*
752                  * The iterator gets unlocked by __bch2_read_extent - need to
753                  * save a copy of @k elsewhere:
754                   */
755                 bch2_bkey_buf_reassemble(&sk, c, k);
756                 k = bkey_i_to_s_c(sk.k);
757
758                 ret2 = bch2_move_extent(&trans, ctxt, wp, io_opts, btree_id, k,
759                                         data_cmd, data_opts);
760                 if (ret2) {
761                         if (ret2 == -EINTR)
762                                 continue;
763
764                         if (ret2 == -ENOMEM) {
765                                 /* memory allocation failure, wait for some IO to finish */
766                                 bch2_move_ctxt_wait_for_io(ctxt, &trans);
767                                 continue;
768                         }
769
770                         /* XXX signal failure */
771                         goto next;
772                 }
773
774                 if (rate)
775                         bch2_ratelimit_increment(rate, k.k->size);
776 next:
777                 atomic64_add(k.k->size, &stats->sectors_seen);
778 next_nondata:
779                 bch2_btree_iter_advance(&iter);
780         }
781 out:
782
783         bch2_trans_iter_exit(&trans, &iter);
784         bch2_trans_exit(&trans);
785         bch2_bkey_buf_exit(&sk, c);
786
787         return ret;
788 }
789
790 inline void bch_move_stats_init(struct bch_move_stats *stats, char *name)
791 {
792         memset(stats, 0, sizeof(*stats));
793
794         scnprintf(stats->name, sizeof(stats->name),
795                         "%s", name);
796 }
797
798 static inline void progress_list_add(struct bch_fs *c,
799                                      struct bch_move_stats *stats)
800 {
801         mutex_lock(&c->data_progress_lock);
802         list_add(&stats->list, &c->data_progress_list);
803         mutex_unlock(&c->data_progress_lock);
804 }
805
806 static inline void progress_list_del(struct bch_fs *c,
807                                      struct bch_move_stats *stats)
808 {
809         mutex_lock(&c->data_progress_lock);
810         list_del(&stats->list);
811         mutex_unlock(&c->data_progress_lock);
812 }
813
814 int bch2_move_data(struct bch_fs *c,
815                    enum btree_id start_btree_id, struct bpos start_pos,
816                    enum btree_id end_btree_id,   struct bpos end_pos,
817                    struct bch_ratelimit *rate,
818                    struct write_point_specifier wp,
819                    move_pred_fn pred, void *arg,
820                    struct bch_move_stats *stats)
821 {
822         struct moving_context ctxt = { .stats = stats };
823         enum btree_id id;
824         int ret;
825
826         progress_list_add(c, stats);
827         closure_init_stack(&ctxt.cl);
828         INIT_LIST_HEAD(&ctxt.reads);
829         init_waitqueue_head(&ctxt.wait);
830
831         stats->data_type = BCH_DATA_user;
832
833         for (id = start_btree_id;
834              id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1);
835              id++) {
836                 stats->btree_id = id;
837
838                 if (id != BTREE_ID_extents &&
839                     id != BTREE_ID_reflink)
840                         continue;
841
842                 ret = __bch2_move_data(c, &ctxt, rate, wp,
843                                        id == start_btree_id ? start_pos : POS_MIN,
844                                        id == end_btree_id   ? end_pos   : POS_MAX,
845                                        pred, arg, stats, id);
846                 if (ret)
847                         break;
848         }
849
850
851         move_ctxt_wait_event(&ctxt, NULL, list_empty(&ctxt.reads));
852         closure_sync(&ctxt.cl);
853
854         EBUG_ON(atomic_read(&ctxt.write_sectors));
855
856         trace_move_data(c,
857                         atomic64_read(&stats->sectors_moved),
858                         atomic64_read(&stats->keys_moved));
859
860         progress_list_del(c, stats);
861         return ret;
862 }
863
864 typedef enum data_cmd (*move_btree_pred)(struct bch_fs *, void *,
865                                          struct btree *, struct bch_io_opts *,
866                                          struct data_opts *);
867
868 static int bch2_move_btree(struct bch_fs *c,
869                            enum btree_id start_btree_id, struct bpos start_pos,
870                            enum btree_id end_btree_id,   struct bpos end_pos,
871                            move_btree_pred pred, void *arg,
872                            struct bch_move_stats *stats)
873 {
874         bool kthread = (current->flags & PF_KTHREAD) != 0;
875         struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
876         struct btree_trans trans;
877         struct btree_iter iter;
878         struct btree *b;
879         enum btree_id id;
880         struct data_opts data_opts;
881         enum data_cmd cmd;
882         int ret = 0;
883
884         bch2_trans_init(&trans, c, 0, 0);
885         progress_list_add(c, stats);
886
887         stats->data_type = BCH_DATA_btree;
888
889         for (id = start_btree_id;
890              id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1);
891              id++) {
892                 stats->btree_id = id;
893
894                 bch2_trans_node_iter_init(&trans, &iter, id, POS_MIN, 0, 0,
895                                           BTREE_ITER_PREFETCH);
896 retry:
897                 ret = 0;
898                 while (bch2_trans_begin(&trans),
899                        (b = bch2_btree_iter_peek_node(&iter)) &&
900                        !(ret = PTR_ERR_OR_ZERO(b))) {
901                         if (kthread && kthread_should_stop())
902                                 break;
903
904                         if ((cmp_int(id, end_btree_id) ?:
905                              bpos_cmp(b->key.k.p, end_pos)) > 0)
906                                 break;
907
908                         stats->pos = iter.pos;
909
910                         switch ((cmd = pred(c, arg, b, &io_opts, &data_opts))) {
911                         case DATA_SKIP:
912                                 goto next;
913                         case DATA_SCRUB:
914                                 BUG();
915                         case DATA_ADD_REPLICAS:
916                         case DATA_REWRITE:
917                                 break;
918                         default:
919                                 BUG();
920                         }
921
922                         ret = bch2_btree_node_rewrite(&trans, &iter, b, 0) ?: ret;
923                         if (ret == -EINTR)
924                                 continue;
925                         if (ret)
926                                 break;
927 next:
928                         bch2_btree_iter_next_node(&iter);
929                 }
930                 if (ret == -EINTR)
931                         goto retry;
932
933                 bch2_trans_iter_exit(&trans, &iter);
934
935                 if (kthread && kthread_should_stop())
936                         break;
937         }
938
939         bch2_trans_exit(&trans);
940
941         if (ret)
942                 bch_err(c, "error %i in bch2_move_btree", ret);
943
944         /* flush relevant btree updates */
945         closure_wait_event(&c->btree_interior_update_wait,
946                            !bch2_btree_interior_updates_nr_pending(c));
947
948         progress_list_del(c, stats);
949         return ret;
950 }
951
952 #if 0
953 static enum data_cmd scrub_pred(struct bch_fs *c, void *arg,
954                                 struct bkey_s_c k,
955                                 struct bch_io_opts *io_opts,
956                                 struct data_opts *data_opts)
957 {
958         return DATA_SCRUB;
959 }
960 #endif
961
962 static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg,
963                                       struct bkey_s_c k,
964                                       struct bch_io_opts *io_opts,
965                                       struct data_opts *data_opts)
966 {
967         unsigned nr_good = bch2_bkey_durability(c, k);
968         unsigned replicas = bkey_is_btree_ptr(k.k)
969                 ? c->opts.metadata_replicas
970                 : io_opts->data_replicas;
971
972         if (!nr_good || nr_good >= replicas)
973                 return DATA_SKIP;
974
975         data_opts->target               = 0;
976         data_opts->nr_replicas          = 1;
977         data_opts->btree_insert_flags   = 0;
978         return DATA_ADD_REPLICAS;
979 }
980
981 static enum data_cmd migrate_pred(struct bch_fs *c, void *arg,
982                                   struct bkey_s_c k,
983                                   struct bch_io_opts *io_opts,
984                                   struct data_opts *data_opts)
985 {
986         struct bch_ioctl_data *op = arg;
987
988         if (!bch2_bkey_has_device(k, op->migrate.dev))
989                 return DATA_SKIP;
990
991         data_opts->target               = 0;
992         data_opts->nr_replicas          = 1;
993         data_opts->btree_insert_flags   = 0;
994         data_opts->rewrite_dev          = op->migrate.dev;
995         return DATA_REWRITE;
996 }
997
998 static enum data_cmd rereplicate_btree_pred(struct bch_fs *c, void *arg,
999                                             struct btree *b,
1000                                             struct bch_io_opts *io_opts,
1001                                             struct data_opts *data_opts)
1002 {
1003         return rereplicate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
1004 }
1005
1006 static enum data_cmd migrate_btree_pred(struct bch_fs *c, void *arg,
1007                                         struct btree *b,
1008                                         struct bch_io_opts *io_opts,
1009                                         struct data_opts *data_opts)
1010 {
1011         return migrate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
1012 }
1013
1014 static bool bformat_needs_redo(struct bkey_format *f)
1015 {
1016         unsigned i;
1017
1018         for (i = 0; i < f->nr_fields; i++) {
1019                 unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
1020                 u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1));
1021                 u64 field_offset = le64_to_cpu(f->field_offset[i]);
1022
1023                 if (f->bits_per_field[i] > unpacked_bits)
1024                         return true;
1025
1026                 if ((f->bits_per_field[i] == unpacked_bits) && field_offset)
1027                         return true;
1028
1029                 if (((field_offset + ((1ULL << f->bits_per_field[i]) - 1)) &
1030                      unpacked_mask) <
1031                     field_offset)
1032                         return true;
1033         }
1034
1035         return false;
1036 }
1037
1038 static enum data_cmd rewrite_old_nodes_pred(struct bch_fs *c, void *arg,
1039                                             struct btree *b,
1040                                             struct bch_io_opts *io_opts,
1041                                             struct data_opts *data_opts)
1042 {
1043         if (b->version_ondisk != c->sb.version ||
1044             btree_node_need_rewrite(b) ||
1045             bformat_needs_redo(&b->format)) {
1046                 data_opts->target               = 0;
1047                 data_opts->nr_replicas          = 1;
1048                 data_opts->btree_insert_flags   = 0;
1049                 return DATA_REWRITE;
1050         }
1051
1052         return DATA_SKIP;
1053 }
1054
1055 int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
1056 {
1057         int ret;
1058
1059         ret = bch2_move_btree(c,
1060                               0,                POS_MIN,
1061                               BTREE_ID_NR,      SPOS_MAX,
1062                               rewrite_old_nodes_pred, c, stats);
1063         if (!ret) {
1064                 mutex_lock(&c->sb_lock);
1065                 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1066                 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1067                 c->disk_sb.sb->version_min = c->disk_sb.sb->version;
1068                 bch2_write_super(c);
1069                 mutex_unlock(&c->sb_lock);
1070         }
1071
1072         return ret;
1073 }
1074
1075 int bch2_data_job(struct bch_fs *c,
1076                   struct bch_move_stats *stats,
1077                   struct bch_ioctl_data op)
1078 {
1079         int ret = 0;
1080
1081         switch (op.op) {
1082         case BCH_DATA_OP_REREPLICATE:
1083                 bch_move_stats_init(stats, "rereplicate");
1084                 stats->data_type = BCH_DATA_journal;
1085                 ret = bch2_journal_flush_device_pins(&c->journal, -1);
1086
1087                 ret = bch2_move_btree(c,
1088                                       op.start_btree,   op.start_pos,
1089                                       op.end_btree,     op.end_pos,
1090                                       rereplicate_btree_pred, c, stats) ?: ret;
1091                 ret = bch2_replicas_gc2(c) ?: ret;
1092
1093                 ret = bch2_move_data(c,
1094                                      op.start_btree,    op.start_pos,
1095                                      op.end_btree,      op.end_pos,
1096                                      NULL, writepoint_hashed((unsigned long) current),
1097                                      rereplicate_pred, c, stats) ?: ret;
1098                 ret = bch2_replicas_gc2(c) ?: ret;
1099                 break;
1100         case BCH_DATA_OP_MIGRATE:
1101                 if (op.migrate.dev >= c->sb.nr_devices)
1102                         return -EINVAL;
1103
1104                 bch_move_stats_init(stats, "migrate");
1105                 stats->data_type = BCH_DATA_journal;
1106                 ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
1107
1108                 ret = bch2_move_btree(c,
1109                                       op.start_btree,   op.start_pos,
1110                                       op.end_btree,     op.end_pos,
1111                                       migrate_btree_pred, &op, stats) ?: ret;
1112                 ret = bch2_replicas_gc2(c) ?: ret;
1113
1114                 ret = bch2_move_data(c,
1115                                      op.start_btree,    op.start_pos,
1116                                      op.end_btree,      op.end_pos,
1117                                      NULL, writepoint_hashed((unsigned long) current),
1118                                      migrate_pred, &op, stats) ?: ret;
1119                 ret = bch2_replicas_gc2(c) ?: ret;
1120                 break;
1121         case BCH_DATA_OP_REWRITE_OLD_NODES:
1122                 bch_move_stats_init(stats, "rewrite_old_nodes");
1123                 ret = bch2_scan_old_btree_nodes(c, stats);
1124                 break;
1125         default:
1126                 ret = -EINVAL;
1127         }
1128
1129         return ret;
1130 }