]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_io.c
Update bcachefs sources to 400c2f8d96 bcachefs: Mask out unknown compat features...
[bcachefs-tools-debian] / libbcachefs / btree_io.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "super-io.h"
21
22 #include <linux/sched/mm.h>
23 #include <trace/events/bcachefs.h>
24
25 void bch2_btree_node_io_unlock(struct btree *b)
26 {
27         EBUG_ON(!btree_node_write_in_flight(b));
28
29         clear_btree_node_write_in_flight(b);
30         wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
31 }
32
33 void bch2_btree_node_io_lock(struct btree *b)
34 {
35         BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
36
37         wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
38                             TASK_UNINTERRUPTIBLE);
39 }
40
41 void __bch2_btree_node_wait_on_read(struct btree *b)
42 {
43         wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
44                        TASK_UNINTERRUPTIBLE);
45 }
46
47 void __bch2_btree_node_wait_on_write(struct btree *b)
48 {
49         wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
50                        TASK_UNINTERRUPTIBLE);
51 }
52
53 void bch2_btree_node_wait_on_read(struct btree *b)
54 {
55         BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
56
57         wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
58                        TASK_UNINTERRUPTIBLE);
59 }
60
61 void bch2_btree_node_wait_on_write(struct btree *b)
62 {
63         BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
64
65         wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
66                        TASK_UNINTERRUPTIBLE);
67 }
68
69 static void verify_no_dups(struct btree *b,
70                            struct bkey_packed *start,
71                            struct bkey_packed *end)
72 {
73 #ifdef CONFIG_BCACHEFS_DEBUG
74         struct bkey_packed *k, *p;
75
76         if (start == end)
77                 return;
78
79         for (p = start, k = bkey_next(start);
80              k != end;
81              p = k, k = bkey_next(k)) {
82                 struct bkey l = bkey_unpack_key(b, p);
83                 struct bkey r = bkey_unpack_key(b, k);
84
85                 BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0);
86         }
87 #endif
88 }
89
90 static void set_needs_whiteout(struct bset *i, int v)
91 {
92         struct bkey_packed *k;
93
94         for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
95                 k->needs_whiteout = v;
96 }
97
98 static void btree_bounce_free(struct bch_fs *c, size_t size,
99                               bool used_mempool, void *p)
100 {
101         if (used_mempool)
102                 mempool_free(p, &c->btree_bounce_pool);
103         else
104                 vpfree(p, size);
105 }
106
107 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
108                                 bool *used_mempool)
109 {
110         unsigned flags = memalloc_nofs_save();
111         void *p;
112
113         BUG_ON(size > btree_bytes(c));
114
115         *used_mempool = false;
116         p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
117         if (!p) {
118                 *used_mempool = true;
119                 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
120         }
121         memalloc_nofs_restore(flags);
122         return p;
123 }
124
125 static void sort_bkey_ptrs(const struct btree *bt,
126                            struct bkey_packed **ptrs, unsigned nr)
127 {
128         unsigned n = nr, a = nr / 2, b, c, d;
129
130         if (!a)
131                 return;
132
133         /* Heap sort: see lib/sort.c: */
134         while (1) {
135                 if (a)
136                         a--;
137                 else if (--n)
138                         swap(ptrs[0], ptrs[n]);
139                 else
140                         break;
141
142                 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
143                         b = bch2_bkey_cmp_packed(bt,
144                                             ptrs[c],
145                                             ptrs[d]) >= 0 ? c : d;
146                 if (d == n)
147                         b = c;
148
149                 while (b != a &&
150                        bch2_bkey_cmp_packed(bt,
151                                        ptrs[a],
152                                        ptrs[b]) >= 0)
153                         b = (b - 1) / 2;
154                 c = b;
155                 while (b != a) {
156                         b = (b - 1) / 2;
157                         swap(ptrs[b], ptrs[c]);
158                 }
159         }
160 }
161
162 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
163 {
164         struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
165         bool used_mempool = false;
166         size_t bytes = b->whiteout_u64s * sizeof(u64);
167
168         if (!b->whiteout_u64s)
169                 return;
170
171         new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
172
173         ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
174
175         for (k = unwritten_whiteouts_start(c, b);
176              k != unwritten_whiteouts_end(c, b);
177              k = bkey_next(k))
178                 *--ptrs = k;
179
180         sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
181
182         k = new_whiteouts;
183
184         while (ptrs != ptrs_end) {
185                 bkey_copy(k, *ptrs);
186                 k = bkey_next(k);
187                 ptrs++;
188         }
189
190         verify_no_dups(b, new_whiteouts,
191                        (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
192
193         memcpy_u64s(unwritten_whiteouts_start(c, b),
194                     new_whiteouts, b->whiteout_u64s);
195
196         btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
197 }
198
199 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
200                                 bool compacting, enum compact_mode mode)
201 {
202         if (!bset_dead_u64s(b, t))
203                 return false;
204
205         switch (mode) {
206         case COMPACT_LAZY:
207                 return should_compact_bset_lazy(b, t) ||
208                         (compacting && !bset_written(b, bset(b, t)));
209         case COMPACT_ALL:
210                 return true;
211         default:
212                 BUG();
213         }
214 }
215
216 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
217 {
218         struct bset_tree *t;
219         bool ret = false;
220
221         for_each_bset(b, t) {
222                 struct bset *i = bset(b, t);
223                 struct bkey_packed *k, *n, *out, *start, *end;
224                 struct btree_node_entry *src = NULL, *dst = NULL;
225
226                 if (t != b->set && !bset_written(b, i)) {
227                         src = container_of(i, struct btree_node_entry, keys);
228                         dst = max(write_block(b),
229                                   (void *) btree_bkey_last(b, t - 1));
230                 }
231
232                 if (src != dst)
233                         ret = true;
234
235                 if (!should_compact_bset(b, t, ret, mode)) {
236                         if (src != dst) {
237                                 memmove(dst, src, sizeof(*src) +
238                                         le16_to_cpu(src->keys.u64s) *
239                                         sizeof(u64));
240                                 i = &dst->keys;
241                                 set_btree_bset(b, t, i);
242                         }
243                         continue;
244                 }
245
246                 start   = btree_bkey_first(b, t);
247                 end     = btree_bkey_last(b, t);
248
249                 if (src != dst) {
250                         memmove(dst, src, sizeof(*src));
251                         i = &dst->keys;
252                         set_btree_bset(b, t, i);
253                 }
254
255                 out = i->start;
256
257                 for (k = start; k != end; k = n) {
258                         n = bkey_next(k);
259
260                         if (!bkey_deleted(k)) {
261                                 bkey_copy(out, k);
262                                 out = bkey_next(out);
263                         } else {
264                                 BUG_ON(k->needs_whiteout);
265                         }
266                 }
267
268                 i->u64s = cpu_to_le16((u64 *) out - i->_data);
269                 set_btree_bset_end(b, t);
270                 bch2_bset_set_no_aux_tree(b, t);
271                 ret = true;
272         }
273
274         bch2_verify_btree_nr_keys(b);
275
276         bch2_btree_build_aux_trees(b);
277
278         return ret;
279 }
280
281 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
282                             enum compact_mode mode)
283 {
284         return bch2_drop_whiteouts(b, mode);
285 }
286
287 static void btree_node_sort(struct bch_fs *c, struct btree *b,
288                             unsigned start_idx,
289                             unsigned end_idx,
290                             bool filter_whiteouts)
291 {
292         struct btree_node *out;
293         struct sort_iter sort_iter;
294         struct bset_tree *t;
295         struct bset *start_bset = bset(b, &b->set[start_idx]);
296         bool used_mempool = false;
297         u64 start_time, seq = 0;
298         unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
299         bool sorting_entire_node = start_idx == 0 &&
300                 end_idx == b->nsets;
301
302         sort_iter_init(&sort_iter, b);
303
304         for (t = b->set + start_idx;
305              t < b->set + end_idx;
306              t++) {
307                 u64s += le16_to_cpu(bset(b, t)->u64s);
308                 sort_iter_add(&sort_iter,
309                               btree_bkey_first(b, t),
310                               btree_bkey_last(b, t));
311         }
312
313         bytes = sorting_entire_node
314                 ? btree_bytes(c)
315                 : __vstruct_bytes(struct btree_node, u64s);
316
317         out = btree_bounce_alloc(c, bytes, &used_mempool);
318
319         start_time = local_clock();
320
321         u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
322
323         out->keys.u64s = cpu_to_le16(u64s);
324
325         BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
326
327         if (sorting_entire_node)
328                 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
329                                        start_time);
330
331         /* Make sure we preserve bset journal_seq: */
332         for (t = b->set + start_idx; t < b->set + end_idx; t++)
333                 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
334         start_bset->journal_seq = cpu_to_le64(seq);
335
336         if (sorting_entire_node) {
337                 unsigned u64s = le16_to_cpu(out->keys.u64s);
338
339                 BUG_ON(bytes != btree_bytes(c));
340
341                 /*
342                  * Our temporary buffer is the same size as the btree node's
343                  * buffer, we can just swap buffers instead of doing a big
344                  * memcpy()
345                  */
346                 *out = *b->data;
347                 out->keys.u64s = cpu_to_le16(u64s);
348                 swap(out, b->data);
349                 set_btree_bset(b, b->set, &b->data->keys);
350         } else {
351                 start_bset->u64s = out->keys.u64s;
352                 memcpy_u64s(start_bset->start,
353                             out->keys.start,
354                             le16_to_cpu(out->keys.u64s));
355         }
356
357         for (i = start_idx + 1; i < end_idx; i++)
358                 b->nr.bset_u64s[start_idx] +=
359                         b->nr.bset_u64s[i];
360
361         b->nsets -= shift;
362
363         for (i = start_idx + 1; i < b->nsets; i++) {
364                 b->nr.bset_u64s[i]      = b->nr.bset_u64s[i + shift];
365                 b->set[i]               = b->set[i + shift];
366         }
367
368         for (i = b->nsets; i < MAX_BSETS; i++)
369                 b->nr.bset_u64s[i] = 0;
370
371         set_btree_bset_end(b, &b->set[start_idx]);
372         bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
373
374         btree_bounce_free(c, bytes, used_mempool, out);
375
376         bch2_verify_btree_nr_keys(b);
377 }
378
379 void bch2_btree_sort_into(struct bch_fs *c,
380                          struct btree *dst,
381                          struct btree *src)
382 {
383         struct btree_nr_keys nr;
384         struct btree_node_iter src_iter;
385         u64 start_time = local_clock();
386
387         BUG_ON(dst->nsets != 1);
388
389         bch2_bset_set_no_aux_tree(dst, dst->set);
390
391         bch2_btree_node_iter_init_from_start(&src_iter, src);
392
393         if (btree_node_is_extents(src))
394                 nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
395                                 src, &src_iter,
396                                 &dst->format,
397                                 true);
398         else
399                 nr = bch2_sort_repack(btree_bset_first(dst),
400                                 src, &src_iter,
401                                 &dst->format,
402                                 true);
403
404         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
405                                start_time);
406
407         set_btree_bset_end(dst, dst->set);
408
409         dst->nr.live_u64s       += nr.live_u64s;
410         dst->nr.bset_u64s[0]    += nr.bset_u64s[0];
411         dst->nr.packed_keys     += nr.packed_keys;
412         dst->nr.unpacked_keys   += nr.unpacked_keys;
413
414         bch2_verify_btree_nr_keys(dst);
415 }
416
417 #define SORT_CRIT       (4096 / sizeof(u64))
418
419 /*
420  * We're about to add another bset to the btree node, so if there's currently
421  * too many bsets - sort some of them together:
422  */
423 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
424 {
425         unsigned unwritten_idx;
426         bool ret = false;
427
428         for (unwritten_idx = 0;
429              unwritten_idx < b->nsets;
430              unwritten_idx++)
431                 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
432                         break;
433
434         if (b->nsets - unwritten_idx > 1) {
435                 btree_node_sort(c, b, unwritten_idx,
436                                 b->nsets, false);
437                 ret = true;
438         }
439
440         if (unwritten_idx > 1) {
441                 btree_node_sort(c, b, 0, unwritten_idx, false);
442                 ret = true;
443         }
444
445         return ret;
446 }
447
448 void bch2_btree_build_aux_trees(struct btree *b)
449 {
450         struct bset_tree *t;
451
452         for_each_bset(b, t)
453                 bch2_bset_build_aux_tree(b, t,
454                                 !bset_written(b, bset(b, t)) &&
455                                 t == bset_tree_last(b));
456 }
457
458 /*
459  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
460  * inserted into
461  *
462  * Safe to call if there already is an unwritten bset - will only add a new bset
463  * if @b doesn't already have one.
464  *
465  * Returns true if we sorted (i.e. invalidated iterators
466  */
467 void bch2_btree_init_next(struct btree_trans *trans,
468                           struct btree_iter *iter,
469                           struct btree *b)
470 {
471         struct bch_fs *c = trans->c;
472         struct btree_node_entry *bne;
473         bool reinit_iter = false;
474
475         EBUG_ON(!(b->c.lock.state.seq & 1));
476         EBUG_ON(iter && iter->l[b->c.level].b != b);
477         BUG_ON(bset_written(b, bset(b, &b->set[1])));
478
479         if (b->nsets == MAX_BSETS &&
480             !btree_node_write_in_flight(b)) {
481                 unsigned log_u64s[] = {
482                         ilog2(bset_u64s(&b->set[0])),
483                         ilog2(bset_u64s(&b->set[1])),
484                         ilog2(bset_u64s(&b->set[2])),
485                 };
486
487                 if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
488                         bch2_btree_node_write(c, b, SIX_LOCK_write);
489                         reinit_iter = true;
490                 }
491         }
492
493         if (b->nsets == MAX_BSETS &&
494             btree_node_compact(c, b))
495                 reinit_iter = true;
496
497         BUG_ON(b->nsets >= MAX_BSETS);
498
499         bne = want_new_bset(c, b);
500         if (bne)
501                 bch2_bset_init_next(c, b, bne);
502
503         bch2_btree_build_aux_trees(b);
504
505         if (iter && reinit_iter)
506                 bch2_btree_iter_reinit_node(iter, b);
507 }
508
509 static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
510                           struct btree *b)
511 {
512         pr_buf(out, "%s level %u/%u\n  ",
513                bch2_btree_ids[b->c.btree_id],
514                b->c.level,
515                c->btree_roots[b->c.btree_id].level);
516         bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
517 }
518
519 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
520                           struct bch_dev *ca,
521                           struct btree *b, struct bset *i,
522                           unsigned offset, int write)
523 {
524         pr_buf(out, "error validating btree node ");
525         if (write)
526                 pr_buf(out, "before write ");
527         if (ca)
528                 pr_buf(out, "on %s ", ca->name);
529         pr_buf(out, "at btree ");
530         btree_pos_to_text(out, c, b);
531
532         pr_buf(out, "\n  node offset %u", b->written);
533         if (i)
534                 pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
535 }
536
537 enum btree_err_type {
538         BTREE_ERR_FIXABLE,
539         BTREE_ERR_WANT_RETRY,
540         BTREE_ERR_MUST_RETRY,
541         BTREE_ERR_FATAL,
542 };
543
544 enum btree_validate_ret {
545         BTREE_RETRY_READ = 64,
546 };
547
548 #define btree_err(type, c, ca, b, i, msg, ...)                          \
549 ({                                                                      \
550         __label__ out;                                                  \
551         char _buf[300];                                                 \
552         char *_buf2 = _buf;                                             \
553         struct printbuf out = PBUF(_buf);                               \
554                                                                         \
555         _buf2 = kmalloc(4096, GFP_ATOMIC);                              \
556         if (_buf2)                                                      \
557                 out = _PBUF(_buf2, 4986);                               \
558                                                                         \
559         btree_err_msg(&out, c, ca, b, i, b->written, write);            \
560         pr_buf(&out, ": " msg, ##__VA_ARGS__);                          \
561                                                                         \
562         if (type == BTREE_ERR_FIXABLE &&                                \
563             write == READ &&                                            \
564             !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) {             \
565                 mustfix_fsck_err(c, "%s", _buf2);                       \
566                 goto out;                                               \
567         }                                                               \
568                                                                         \
569         switch (write) {                                                \
570         case READ:                                                      \
571                 bch_err(c, "%s", _buf2);                                \
572                                                                         \
573                 switch (type) {                                         \
574                 case BTREE_ERR_FIXABLE:                                 \
575                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
576                         goto fsck_err;                                  \
577                 case BTREE_ERR_WANT_RETRY:                              \
578                         if (have_retry) {                               \
579                                 ret = BTREE_RETRY_READ;                 \
580                                 goto fsck_err;                          \
581                         }                                               \
582                         break;                                          \
583                 case BTREE_ERR_MUST_RETRY:                              \
584                         ret = BTREE_RETRY_READ;                         \
585                         goto fsck_err;                                  \
586                 case BTREE_ERR_FATAL:                                   \
587                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
588                         goto fsck_err;                                  \
589                 }                                                       \
590                 break;                                                  \
591         case WRITE:                                                     \
592                 bch_err(c, "corrupt metadata before write: %s", _buf2); \
593                                                                         \
594                 if (bch2_fs_inconsistent(c)) {                          \
595                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
596                         goto fsck_err;                                  \
597                 }                                                       \
598                 break;                                                  \
599         }                                                               \
600 out:                                                                    \
601         if (_buf2 != _buf)                                              \
602                 kfree(_buf2);                                           \
603         true;                                                           \
604 })
605
606 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
607
608 /*
609  * When btree topology repair changes the start or end of a node, that might
610  * mean we have to drop keys that are no longer inside the node:
611  */
612 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
613 {
614         struct bset_tree *t;
615         struct bkey_s_c k;
616         struct bkey unpacked;
617         struct btree_node_iter iter;
618
619         for_each_bset(b, t) {
620                 struct bset *i = bset(b, t);
621                 struct bkey_packed *k;
622
623                 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
624                         if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
625                                 break;
626
627                 if (k != i->start) {
628                         unsigned shift = (u64 *) k - (u64 *) i->start;
629
630                         memmove_u64s_down(i->start, k,
631                                           (u64 *) vstruct_end(i) - (u64 *) k);
632                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
633                         set_btree_bset_end(b, t);
634                         bch2_bset_set_no_aux_tree(b, t);
635                 }
636
637                 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
638                         if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
639                                 break;
640
641                 if (k != vstruct_last(i)) {
642                         i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
643                         set_btree_bset_end(b, t);
644                         bch2_bset_set_no_aux_tree(b, t);
645                 }
646         }
647
648         bch2_btree_build_aux_trees(b);
649
650         for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
651                 BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
652                 BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
653         }
654 }
655
656 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
657                          struct btree *b, struct bset *i,
658                          unsigned sectors, int write, bool have_retry)
659 {
660         unsigned version = le16_to_cpu(i->version);
661         const char *err;
662         char buf1[100];
663         char buf2[100];
664         int ret = 0;
665
666         btree_err_on((version != BCH_BSET_VERSION_OLD &&
667                       version < bcachefs_metadata_version_min) ||
668                      version >= bcachefs_metadata_version_max,
669                      BTREE_ERR_FATAL, c, ca, b, i,
670                      "unsupported bset version");
671
672         if (btree_err_on(version < c->sb.version_min,
673                          BTREE_ERR_FIXABLE, c, NULL, b, i,
674                          "bset version %u older than superblock version_min %u",
675                          version, c->sb.version_min)) {
676                 mutex_lock(&c->sb_lock);
677                 c->disk_sb.sb->version_min = cpu_to_le16(version);
678                 bch2_write_super(c);
679                 mutex_unlock(&c->sb_lock);
680         }
681
682         if (btree_err_on(version > c->sb.version,
683                          BTREE_ERR_FIXABLE, c, NULL, b, i,
684                          "bset version %u newer than superblock version %u",
685                          version, c->sb.version)) {
686                 mutex_lock(&c->sb_lock);
687                 c->disk_sb.sb->version = cpu_to_le16(version);
688                 bch2_write_super(c);
689                 mutex_unlock(&c->sb_lock);
690         }
691
692         btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
693                      BTREE_ERR_FATAL, c, ca, b, i,
694                      "BSET_SEPARATE_WHITEOUTS no longer supported");
695
696         if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
697                          BTREE_ERR_FIXABLE, c, ca, b, i,
698                          "bset past end of btree node")) {
699                 i->u64s = 0;
700                 return 0;
701         }
702
703         btree_err_on(b->written && !i->u64s,
704                      BTREE_ERR_FIXABLE, c, ca, b, i,
705                      "empty bset");
706
707         if (!b->written) {
708                 struct btree_node *bn =
709                         container_of(i, struct btree_node, keys);
710                 /* These indicate that we read the wrong btree node: */
711
712                 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
713                         struct bch_btree_ptr_v2 *bp =
714                                 &bkey_i_to_btree_ptr_v2(&b->key)->v;
715
716                         /* XXX endianness */
717                         btree_err_on(bp->seq != bn->keys.seq,
718                                      BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
719                                      "incorrect sequence number (wrong btree node)");
720                 }
721
722                 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
723                              BTREE_ERR_MUST_RETRY, c, ca, b, i,
724                              "incorrect btree id");
725
726                 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
727                              BTREE_ERR_MUST_RETRY, c, ca, b, i,
728                              "incorrect level");
729
730                 if (!write)
731                         compat_btree_node(b->c.level, b->c.btree_id, version,
732                                           BSET_BIG_ENDIAN(i), write, bn);
733
734                 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
735                         struct bch_btree_ptr_v2 *bp =
736                                 &bkey_i_to_btree_ptr_v2(&b->key)->v;
737
738                         if (BTREE_PTR_RANGE_UPDATED(bp)) {
739                                 b->data->min_key = bp->min_key;
740                                 b->data->max_key = b->key.k.p;
741                         }
742
743                         btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
744                                      BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
745                                      "incorrect min_key: got %s should be %s",
746                                      (bch2_bpos_to_text(&PBUF(buf1), bn->min_key), buf1),
747                                      (bch2_bpos_to_text(&PBUF(buf2), bp->min_key), buf2));
748                 }
749
750                 btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
751                              BTREE_ERR_MUST_RETRY, c, ca, b, i,
752                              "incorrect max key %s",
753                              (bch2_bpos_to_text(&PBUF(buf1), bn->max_key), buf1));
754
755                 if (write)
756                         compat_btree_node(b->c.level, b->c.btree_id, version,
757                                           BSET_BIG_ENDIAN(i), write, bn);
758
759                 err = bch2_bkey_format_validate(&bn->format);
760                 btree_err_on(err,
761                              BTREE_ERR_FATAL, c, ca, b, i,
762                              "invalid bkey format: %s", err);
763
764                 compat_bformat(b->c.level, b->c.btree_id, version,
765                                BSET_BIG_ENDIAN(i), write,
766                                &bn->format);
767         }
768 fsck_err:
769         return ret;
770 }
771
772 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
773                          struct bset *i, unsigned *whiteout_u64s,
774                          int write, bool have_retry)
775 {
776         unsigned version = le16_to_cpu(i->version);
777         struct bkey_packed *k, *prev = NULL;
778         bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
779                 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
780         int ret = 0;
781
782         for (k = i->start;
783              k != vstruct_last(i);) {
784                 struct bkey_s u;
785                 struct bkey tmp;
786                 const char *invalid;
787
788                 if (btree_err_on(bkey_next(k) > vstruct_last(i),
789                                  BTREE_ERR_FIXABLE, c, NULL, b, i,
790                                  "key extends past end of bset")) {
791                         i->u64s = cpu_to_le16((u64 *) k - i->_data);
792                         break;
793                 }
794
795                 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
796                                  BTREE_ERR_FIXABLE, c, NULL, b, i,
797                                  "invalid bkey format %u", k->format)) {
798                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
799                         memmove_u64s_down(k, bkey_next(k),
800                                           (u64 *) vstruct_end(i) - (u64 *) k);
801                         continue;
802                 }
803
804                 /* XXX: validate k->u64s */
805                 if (!write)
806                         bch2_bkey_compat(b->c.level, b->c.btree_id, version,
807                                     BSET_BIG_ENDIAN(i), write,
808                                     &b->format, k);
809
810                 u = __bkey_disassemble(b, k, &tmp);
811
812                 invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?:
813                         (!updated_range ?  bch2_bkey_in_btree_node(b, u.s_c) : NULL) ?:
814                         (write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
815                 if (invalid) {
816                         char buf[160];
817
818                         bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
819                         btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
820                                   "invalid bkey: %s\n%s", invalid, buf);
821
822                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
823                         memmove_u64s_down(k, bkey_next(k),
824                                           (u64 *) vstruct_end(i) - (u64 *) k);
825                         continue;
826                 }
827
828                 if (write)
829                         bch2_bkey_compat(b->c.level, b->c.btree_id, version,
830                                     BSET_BIG_ENDIAN(i), write,
831                                     &b->format, k);
832
833                 if (prev && bkey_iter_cmp(b, prev, k) > 0) {
834                         char buf1[80];
835                         char buf2[80];
836                         struct bkey up = bkey_unpack_key(b, prev);
837
838                         bch2_bkey_to_text(&PBUF(buf1), &up);
839                         bch2_bkey_to_text(&PBUF(buf2), u.k);
840
841                         bch2_dump_bset(c, b, i, 0);
842
843                         if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
844                                       "keys out of order: %s > %s",
845                                       buf1, buf2)) {
846                                 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
847                                 memmove_u64s_down(k, bkey_next(k),
848                                                   (u64 *) vstruct_end(i) - (u64 *) k);
849                                 continue;
850                         }
851                 }
852
853                 prev = k;
854                 k = bkey_next(k);
855         }
856 fsck_err:
857         return ret;
858 }
859
860 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
861                               struct btree *b, bool have_retry)
862 {
863         struct btree_node_entry *bne;
864         struct sort_iter *iter;
865         struct btree_node *sorted;
866         struct bkey_packed *k;
867         struct bch_extent_ptr *ptr;
868         struct bset *i;
869         bool used_mempool, blacklisted;
870         bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
871                 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
872         unsigned u64s;
873         unsigned nonblacklisted_written = 0;
874         int ret, retry_read = 0, write = READ;
875
876         b->version_ondisk = U16_MAX;
877
878         iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
879         sort_iter_init(iter, b);
880         iter->size = (btree_blocks(c) + 1) * 2;
881
882         if (bch2_meta_read_fault("btree"))
883                 btree_err(BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
884                           "dynamic fault");
885
886         btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
887                      BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
888                      "bad magic");
889
890         btree_err_on(!b->data->keys.seq,
891                      BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
892                      "bad btree header");
893
894         if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
895                 struct bch_btree_ptr_v2 *bp =
896                         &bkey_i_to_btree_ptr_v2(&b->key)->v;
897
898                 btree_err_on(b->data->keys.seq != bp->seq,
899                              BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
900                              "got wrong btree node (seq %llx want %llx)",
901                              b->data->keys.seq, bp->seq);
902         }
903
904         while (b->written < c->opts.btree_node_size) {
905                 unsigned sectors, whiteout_u64s = 0;
906                 struct nonce nonce;
907                 struct bch_csum csum;
908                 bool first = !b->written;
909
910                 if (!b->written) {
911                         i = &b->data->keys;
912
913                         btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
914                                      BTREE_ERR_WANT_RETRY, c, ca, b, i,
915                                      "unknown checksum type %llu",
916                                      BSET_CSUM_TYPE(i));
917
918                         nonce = btree_nonce(i, b->written << 9);
919                         csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
920
921                         btree_err_on(bch2_crc_cmp(csum, b->data->csum),
922                                      BTREE_ERR_WANT_RETRY, c, ca, b, i,
923                                      "invalid checksum");
924
925                         bset_encrypt(c, i, b->written << 9);
926
927                         btree_err_on(btree_node_is_extents(b) &&
928                                      !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
929                                      BTREE_ERR_FATAL, c, NULL, b, NULL,
930                                      "btree node does not have NEW_EXTENT_OVERWRITE set");
931
932                         sectors = vstruct_sectors(b->data, c->block_bits);
933                 } else {
934                         bne = write_block(b);
935                         i = &bne->keys;
936
937                         if (i->seq != b->data->keys.seq)
938                                 break;
939
940                         btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
941                                      BTREE_ERR_WANT_RETRY, c, ca, b, i,
942                                      "unknown checksum type %llu",
943                                      BSET_CSUM_TYPE(i));
944
945                         nonce = btree_nonce(i, b->written << 9);
946                         csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
947
948                         btree_err_on(bch2_crc_cmp(csum, bne->csum),
949                                      BTREE_ERR_WANT_RETRY, c, ca, b, i,
950                                      "invalid checksum");
951
952                         bset_encrypt(c, i, b->written << 9);
953
954                         sectors = vstruct_sectors(bne, c->block_bits);
955                 }
956
957                 b->version_ondisk = min(b->version_ondisk,
958                                         le16_to_cpu(i->version));
959
960                 ret = validate_bset(c, ca, b, i, sectors,
961                                     READ, have_retry);
962                 if (ret)
963                         goto fsck_err;
964
965                 if (!b->written)
966                         btree_node_set_format(b, b->data->format);
967
968                 ret = validate_bset_keys(c, b, i, &whiteout_u64s,
969                                     READ, have_retry);
970                 if (ret)
971                         goto fsck_err;
972
973                 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
974
975                 b->written += sectors;
976
977                 blacklisted = bch2_journal_seq_is_blacklisted(c,
978                                         le64_to_cpu(i->journal_seq),
979                                         true);
980
981                 btree_err_on(blacklisted && first,
982                              BTREE_ERR_FIXABLE, c, ca, b, i,
983                              "first btree node bset has blacklisted journal seq");
984                 if (blacklisted && !first)
985                         continue;
986
987                 sort_iter_add(iter, i->start,
988                               vstruct_idx(i, whiteout_u64s));
989
990                 sort_iter_add(iter,
991                               vstruct_idx(i, whiteout_u64s),
992                               vstruct_last(i));
993
994                 nonblacklisted_written = b->written;
995         }
996
997         for (bne = write_block(b);
998              bset_byte_offset(b, bne) < btree_bytes(c);
999              bne = (void *) bne + block_bytes(c))
1000                 btree_err_on(bne->keys.seq == b->data->keys.seq &&
1001                              !bch2_journal_seq_is_blacklisted(c,
1002                                         le64_to_cpu(bne->keys.journal_seq),
1003                                         true),
1004                              BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
1005                              "found bset signature after last bset");
1006
1007         /*
1008          * Blacklisted bsets are those that were written after the most recent
1009          * (flush) journal write. Since there wasn't a flush, they may not have
1010          * made it to all devices - which means we shouldn't write new bsets
1011          * after them, as that could leave a gap and then reads from that device
1012          * wouldn't find all the bsets in that btree node - which means it's
1013          * important that we start writing new bsets after the most recent _non_
1014          * blacklisted bset:
1015          */
1016         b->written = nonblacklisted_written;
1017
1018         sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
1019         sorted->keys.u64s = 0;
1020
1021         set_btree_bset(b, b->set, &b->data->keys);
1022
1023         b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1024
1025         u64s = le16_to_cpu(sorted->keys.u64s);
1026         *sorted = *b->data;
1027         sorted->keys.u64s = cpu_to_le16(u64s);
1028         swap(sorted, b->data);
1029         set_btree_bset(b, b->set, &b->data->keys);
1030         b->nsets = 1;
1031
1032         BUG_ON(b->nr.live_u64s != u64s);
1033
1034         btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
1035
1036         if (updated_range)
1037                 bch2_btree_node_drop_keys_outside_node(b);
1038
1039         i = &b->data->keys;
1040         for (k = i->start; k != vstruct_last(i);) {
1041                 struct bkey tmp;
1042                 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1043                 const char *invalid = bch2_bkey_val_invalid(c, u.s_c);
1044
1045                 if (invalid ||
1046                     (bch2_inject_invalid_keys &&
1047                      !bversion_cmp(u.k->version, MAX_VERSION))) {
1048                         char buf[160];
1049
1050                         bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
1051                         btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
1052                                   "invalid bkey %s: %s", buf, invalid);
1053
1054                         btree_keys_account_key_drop(&b->nr, 0, k);
1055
1056                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1057                         memmove_u64s_down(k, bkey_next(k),
1058                                           (u64 *) vstruct_end(i) - (u64 *) k);
1059                         set_btree_bset_end(b, b->set);
1060                         continue;
1061                 }
1062
1063                 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1064                         struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1065
1066                         bp.v->mem_ptr = 0;
1067                 }
1068
1069                 k = bkey_next(k);
1070         }
1071
1072         bch2_bset_build_aux_tree(b, b->set, false);
1073
1074         set_needs_whiteout(btree_bset_first(b), true);
1075
1076         btree_node_reset_sib_u64s(b);
1077
1078         bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1079                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1080
1081                 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1082                         set_btree_node_need_rewrite(b);
1083         }
1084 out:
1085         mempool_free(iter, &c->fill_iter);
1086         return retry_read;
1087 fsck_err:
1088         if (ret == BTREE_RETRY_READ) {
1089                 retry_read = 1;
1090         } else {
1091                 bch2_inconsistent_error(c);
1092                 set_btree_node_read_error(b);
1093         }
1094         goto out;
1095 }
1096
1097 static void btree_node_read_work(struct work_struct *work)
1098 {
1099         struct btree_read_bio *rb =
1100                 container_of(work, struct btree_read_bio, work);
1101         struct bch_fs *c        = rb->c;
1102         struct btree *b         = rb->b;
1103         struct bch_dev *ca      = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1104         struct bio *bio         = &rb->bio;
1105         struct bch_io_failures failed = { .nr = 0 };
1106         char buf[200];
1107         struct printbuf out;
1108         bool saw_error = false;
1109         bool can_retry;
1110
1111         goto start;
1112         while (1) {
1113                 bch_info(c, "retrying read");
1114                 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1115                 rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1116                 bio_reset(bio);
1117                 bio->bi_opf             = REQ_OP_READ|REQ_SYNC|REQ_META;
1118                 bio->bi_iter.bi_sector  = rb->pick.ptr.offset;
1119                 bio->bi_iter.bi_size    = btree_bytes(c);
1120
1121                 if (rb->have_ioref) {
1122                         bio_set_dev(bio, ca->disk_sb.bdev);
1123                         submit_bio_wait(bio);
1124                 } else {
1125                         bio->bi_status = BLK_STS_REMOVED;
1126                 }
1127 start:
1128                 out = PBUF(buf);
1129                 btree_pos_to_text(&out, c, b);
1130                 bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
1131                                    bch2_blk_status_to_str(bio->bi_status), buf);
1132                 if (rb->have_ioref)
1133                         percpu_ref_put(&ca->io_ref);
1134                 rb->have_ioref = false;
1135
1136                 bch2_mark_io_failure(&failed, &rb->pick);
1137
1138                 can_retry = bch2_bkey_pick_read_device(c,
1139                                 bkey_i_to_s_c(&b->key),
1140                                 &failed, &rb->pick) > 0;
1141
1142                 if (!bio->bi_status &&
1143                     !bch2_btree_node_read_done(c, ca, b, can_retry))
1144                         break;
1145
1146                 saw_error = true;
1147
1148                 if (!can_retry) {
1149                         set_btree_node_read_error(b);
1150                         break;
1151                 }
1152         }
1153
1154         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1155                                rb->start_time);
1156         bio_put(&rb->bio);
1157
1158         if (saw_error && !btree_node_read_error(b))
1159                 bch2_btree_node_rewrite_async(c, b);
1160
1161         clear_btree_node_read_in_flight(b);
1162         wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1163 }
1164
1165 static void btree_node_read_endio(struct bio *bio)
1166 {
1167         struct btree_read_bio *rb =
1168                 container_of(bio, struct btree_read_bio, bio);
1169         struct bch_fs *c        = rb->c;
1170
1171         if (rb->have_ioref) {
1172                 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1173                 bch2_latency_acct(ca, rb->start_time, READ);
1174         }
1175
1176         queue_work(c->io_complete_wq, &rb->work);
1177 }
1178
1179 struct btree_node_read_all {
1180         struct closure          cl;
1181         struct bch_fs           *c;
1182         struct btree            *b;
1183         unsigned                nr;
1184         void                    *buf[BCH_REPLICAS_MAX];
1185         struct bio              *bio[BCH_REPLICAS_MAX];
1186         int                     err[BCH_REPLICAS_MAX];
1187 };
1188
1189 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1190 {
1191         struct btree_node *bn = data;
1192         struct btree_node_entry *bne;
1193         unsigned offset = 0;
1194
1195         if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1196                 return 0;
1197
1198         while (offset < c->opts.btree_node_size) {
1199                 if (!offset) {
1200                         offset += vstruct_sectors(bn, c->block_bits);
1201                 } else {
1202                         bne = data + (offset << 9);
1203                         if (bne->keys.seq != bn->keys.seq)
1204                                 break;
1205                         offset += vstruct_sectors(bne, c->block_bits);
1206                 }
1207         }
1208
1209         return offset;
1210 }
1211
1212 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1213 {
1214         struct btree_node *bn = data;
1215         struct btree_node_entry *bne;
1216
1217         if (!offset)
1218                 return false;
1219
1220         while (offset < c->opts.btree_node_size) {
1221                 bne = data + (offset << 9);
1222                 if (bne->keys.seq == bn->keys.seq)
1223                         return true;
1224                 offset++;
1225         }
1226
1227         return false;
1228         return offset;
1229 }
1230
1231 static void btree_node_read_all_replicas_done(struct closure *cl)
1232 {
1233         struct btree_node_read_all *ra =
1234                 container_of(cl, struct btree_node_read_all, cl);
1235         struct bch_fs *c = ra->c;
1236         struct btree *b = ra->b;
1237         bool dump_bset_maps = false;
1238         bool have_retry = false;
1239         int ret = 0, best = -1, write = READ;
1240         unsigned i, written, written2;
1241         __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1242                 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1243
1244         for (i = 0; i < ra->nr; i++) {
1245                 struct btree_node *bn = ra->buf[i];
1246
1247                 if (ra->err[i])
1248                         continue;
1249
1250                 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1251                     (seq && seq != bn->keys.seq))
1252                         continue;
1253
1254                 if (best < 0) {
1255                         best = i;
1256                         written = btree_node_sectors_written(c, bn);
1257                         continue;
1258                 }
1259
1260                 written2 = btree_node_sectors_written(c, ra->buf[i]);
1261                 if (btree_err_on(written2 != written, BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1262                                  "btree node sectors written mismatch: %u != %u",
1263                                  written, written2) ||
1264                     btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1265                                  BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1266                                  "found bset signature after last bset") ||
1267                     btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1268                                  BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1269                                  "btree node replicas content mismatch"))
1270                         dump_bset_maps = true;
1271
1272                 if (written2 > written) {
1273                         written = written2;
1274                         best = i;
1275                 }
1276         }
1277 fsck_err:
1278         if (dump_bset_maps) {
1279                 for (i = 0; i < ra->nr; i++) {
1280                         char buf[200];
1281                         struct printbuf out = PBUF(buf);
1282                         struct btree_node *bn = ra->buf[i];
1283                         struct btree_node_entry *bne = NULL;
1284                         unsigned offset = 0, sectors;
1285                         bool gap = false;
1286
1287                         if (ra->err[i])
1288                                 continue;
1289
1290                         while (offset < c->opts.btree_node_size) {
1291                                 if (!offset) {
1292                                         sectors = vstruct_sectors(bn, c->block_bits);
1293                                 } else {
1294                                         bne = ra->buf[i] + (offset << 9);
1295                                         if (bne->keys.seq != bn->keys.seq)
1296                                                 break;
1297                                         sectors = vstruct_sectors(bne, c->block_bits);
1298                                 }
1299
1300                                 pr_buf(&out, " %u-%u", offset, offset + sectors);
1301                                 if (bne && bch2_journal_seq_is_blacklisted(c,
1302                                                         le64_to_cpu(bne->keys.journal_seq), false))
1303                                         pr_buf(&out, "*");
1304                                 offset += sectors;
1305                         }
1306
1307                         while (offset < c->opts.btree_node_size) {
1308                                 bne = ra->buf[i] + (offset << 9);
1309                                 if (bne->keys.seq == bn->keys.seq) {
1310                                         if (!gap)
1311                                                 pr_buf(&out, " GAP");
1312                                         gap = true;
1313
1314                                         sectors = vstruct_sectors(bne, c->block_bits);
1315                                         pr_buf(&out, " %u-%u", offset, offset + sectors);
1316                                         if (bch2_journal_seq_is_blacklisted(c,
1317                                                         le64_to_cpu(bne->keys.journal_seq), false))
1318                                                 pr_buf(&out, "*");
1319                                 }
1320                                 offset++;
1321                         }
1322
1323                         bch_err(c, "replica %u:%s", i, buf);
1324                 }
1325         }
1326
1327         if (best >= 0) {
1328                 memcpy(b->data, ra->buf[best], btree_bytes(c));
1329                 ret = bch2_btree_node_read_done(c, NULL, b, false);
1330         } else {
1331                 ret = -1;
1332         }
1333
1334         if (ret)
1335                 set_btree_node_read_error(b);
1336
1337         for (i = 0; i < ra->nr; i++) {
1338                 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1339                 bio_put(ra->bio[i]);
1340         }
1341
1342         closure_debug_destroy(&ra->cl);
1343         kfree(ra);
1344
1345         clear_btree_node_read_in_flight(b);
1346         wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1347 }
1348
1349 static void btree_node_read_all_replicas_endio(struct bio *bio)
1350 {
1351         struct btree_read_bio *rb =
1352                 container_of(bio, struct btree_read_bio, bio);
1353         struct bch_fs *c        = rb->c;
1354         struct btree_node_read_all *ra = rb->ra;
1355
1356         if (rb->have_ioref) {
1357                 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1358                 bch2_latency_acct(ca, rb->start_time, READ);
1359         }
1360
1361         ra->err[rb->idx] = bio->bi_status;
1362         closure_put(&ra->cl);
1363 }
1364
1365 /*
1366  * XXX This allocates multiple times from the same mempools, and can deadlock
1367  * under sufficient memory pressure (but is only a debug path)
1368  */
1369 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1370 {
1371         struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1372         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1373         const union bch_extent_entry *entry;
1374         struct extent_ptr_decoded pick;
1375         struct btree_node_read_all *ra;
1376         unsigned i;
1377
1378         ra = kzalloc(sizeof(*ra), GFP_NOFS);
1379         if (!ra)
1380                 return -ENOMEM;
1381
1382         closure_init(&ra->cl, NULL);
1383         ra->c   = c;
1384         ra->b   = b;
1385         ra->nr  = bch2_bkey_nr_ptrs(k);
1386
1387         for (i = 0; i < ra->nr; i++) {
1388                 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1389                 ra->bio[i] = bio_alloc_bioset(GFP_NOFS, buf_pages(ra->buf[i],
1390                                                                   btree_bytes(c)),
1391                                               &c->btree_bio);
1392         }
1393
1394         i = 0;
1395         bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1396                 struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1397                 struct btree_read_bio *rb =
1398                         container_of(ra->bio[i], struct btree_read_bio, bio);
1399                 rb->c                   = c;
1400                 rb->b                   = b;
1401                 rb->ra                  = ra;
1402                 rb->start_time          = local_clock();
1403                 rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1404                 rb->idx                 = i;
1405                 rb->pick                = pick;
1406                 rb->bio.bi_opf          = REQ_OP_READ|REQ_SYNC|REQ_META;
1407                 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1408                 rb->bio.bi_end_io       = btree_node_read_all_replicas_endio;
1409                 bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
1410
1411                 if (rb->have_ioref) {
1412                         this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1413                                      bio_sectors(&rb->bio));
1414                         bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1415
1416                         closure_get(&ra->cl);
1417                         submit_bio(&rb->bio);
1418                 } else {
1419                         ra->err[i] = BLK_STS_REMOVED;
1420                 }
1421
1422                 i++;
1423         }
1424
1425         if (sync) {
1426                 closure_sync(&ra->cl);
1427                 btree_node_read_all_replicas_done(&ra->cl);
1428         } else {
1429                 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1430                             c->io_complete_wq);
1431         }
1432
1433         return 0;
1434 }
1435
1436 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1437                           bool sync)
1438 {
1439         struct extent_ptr_decoded pick;
1440         struct btree_read_bio *rb;
1441         struct bch_dev *ca;
1442         struct bio *bio;
1443         char buf[200];
1444         int ret;
1445
1446         btree_pos_to_text(&PBUF(buf), c, b);
1447         trace_btree_read(c, b);
1448
1449         if (bch2_verify_all_btree_replicas &&
1450             !btree_node_read_all_replicas(c, b, sync))
1451                 return;
1452
1453         ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1454                                          NULL, &pick);
1455         if (bch2_fs_fatal_err_on(ret <= 0, c,
1456                         "btree node read error: no device to read from\n"
1457                         " at %s", buf)) {
1458                 set_btree_node_read_error(b);
1459                 return;
1460         }
1461
1462         ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1463
1464         bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
1465                                                    btree_bytes(c)),
1466                                &c->btree_bio);
1467         rb = container_of(bio, struct btree_read_bio, bio);
1468         rb->c                   = c;
1469         rb->b                   = b;
1470         rb->ra                  = NULL;
1471         rb->start_time          = local_clock();
1472         rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1473         rb->pick                = pick;
1474         INIT_WORK(&rb->work, btree_node_read_work);
1475         bio->bi_opf             = REQ_OP_READ|REQ_SYNC|REQ_META;
1476         bio->bi_iter.bi_sector  = pick.ptr.offset;
1477         bio->bi_end_io          = btree_node_read_endio;
1478         bch2_bio_map(bio, b->data, btree_bytes(c));
1479
1480         if (rb->have_ioref) {
1481                 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1482                              bio_sectors(bio));
1483                 bio_set_dev(bio, ca->disk_sb.bdev);
1484
1485                 if (sync) {
1486                         submit_bio_wait(bio);
1487
1488                         btree_node_read_work(&rb->work);
1489                 } else {
1490                         submit_bio(bio);
1491                 }
1492         } else {
1493                 bio->bi_status = BLK_STS_REMOVED;
1494
1495                 if (sync)
1496                         btree_node_read_work(&rb->work);
1497                 else
1498                         queue_work(c->io_complete_wq, &rb->work);
1499         }
1500 }
1501
1502 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1503                         const struct bkey_i *k, unsigned level)
1504 {
1505         struct closure cl;
1506         struct btree *b;
1507         int ret;
1508
1509         closure_init_stack(&cl);
1510
1511         do {
1512                 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1513                 closure_sync(&cl);
1514         } while (ret);
1515
1516         b = bch2_btree_node_mem_alloc(c);
1517         bch2_btree_cache_cannibalize_unlock(c);
1518
1519         BUG_ON(IS_ERR(b));
1520
1521         bkey_copy(&b->key, k);
1522         BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1523
1524         set_btree_node_read_in_flight(b);
1525
1526         bch2_btree_node_read(c, b, true);
1527
1528         if (btree_node_read_error(b)) {
1529                 bch2_btree_node_hash_remove(&c->btree_cache, b);
1530
1531                 mutex_lock(&c->btree_cache.lock);
1532                 list_move(&b->list, &c->btree_cache.freeable);
1533                 mutex_unlock(&c->btree_cache.lock);
1534
1535                 ret = -EIO;
1536                 goto err;
1537         }
1538
1539         bch2_btree_set_root_for_read(c, b);
1540 err:
1541         six_unlock_write(&b->c.lock);
1542         six_unlock_intent(&b->c.lock);
1543
1544         return ret;
1545 }
1546
1547 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1548                               struct btree_write *w)
1549 {
1550         unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1551
1552         do {
1553                 old = new = v;
1554                 if (!(old & 1))
1555                         break;
1556
1557                 new &= ~1UL;
1558         } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1559
1560         if (old & 1)
1561                 closure_put(&((struct btree_update *) new)->cl);
1562
1563         bch2_journal_pin_drop(&c->journal, &w->journal);
1564 }
1565
1566 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1567 {
1568         struct btree_write *w = btree_prev_write(b);
1569
1570         bch2_btree_complete_write(c, b, w);
1571         bch2_btree_node_io_unlock(b);
1572 }
1573
1574 static void bch2_btree_node_write_error(struct bch_fs *c,
1575                                         struct btree_write_bio *wbio)
1576 {
1577         struct btree *b         = wbio->wbio.bio.bi_private;
1578         struct bkey_buf k;
1579         struct bch_extent_ptr *ptr;
1580         struct btree_trans trans;
1581         struct btree_iter *iter;
1582         int ret;
1583
1584         bch2_bkey_buf_init(&k);
1585         bch2_trans_init(&trans, c, 0, 0);
1586
1587         iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
1588                                         BTREE_MAX_DEPTH, b->c.level, 0);
1589 retry:
1590         ret = bch2_btree_iter_traverse(iter);
1591         if (ret)
1592                 goto err;
1593
1594         /* has node been freed? */
1595         if (iter->l[b->c.level].b != b) {
1596                 /* node has been freed: */
1597                 BUG_ON(!btree_node_dying(b));
1598                 goto out;
1599         }
1600
1601         BUG_ON(!btree_node_hashed(b));
1602
1603         bch2_bkey_buf_copy(&k, c, &b->key);
1604
1605         bch2_bkey_drop_ptrs(bkey_i_to_s(k.k), ptr,
1606                 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1607
1608         if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k)))
1609                 goto err;
1610
1611         ret = bch2_btree_node_update_key(&trans, iter, b, k.k);
1612         if (ret == -EINTR)
1613                 goto retry;
1614         if (ret)
1615                 goto err;
1616 out:
1617         bch2_trans_iter_put(&trans, iter);
1618         bch2_trans_exit(&trans);
1619         bch2_bkey_buf_exit(&k, c);
1620         bio_put(&wbio->wbio.bio);
1621         btree_node_write_done(c, b);
1622         return;
1623 err:
1624         set_btree_node_noevict(b);
1625         bch2_fs_fatal_error(c, "fatal error writing btree node");
1626         goto out;
1627 }
1628
1629 void bch2_btree_write_error_work(struct work_struct *work)
1630 {
1631         struct bch_fs *c = container_of(work, struct bch_fs,
1632                                         btree_write_error_work);
1633         struct bio *bio;
1634
1635         while (1) {
1636                 spin_lock_irq(&c->btree_write_error_lock);
1637                 bio = bio_list_pop(&c->btree_write_error_list);
1638                 spin_unlock_irq(&c->btree_write_error_lock);
1639
1640                 if (!bio)
1641                         break;
1642
1643                 bch2_btree_node_write_error(c,
1644                         container_of(bio, struct btree_write_bio, wbio.bio));
1645         }
1646 }
1647
1648 static void btree_node_write_work(struct work_struct *work)
1649 {
1650         struct btree_write_bio *wbio =
1651                 container_of(work, struct btree_write_bio, work);
1652         struct bch_fs *c        = wbio->wbio.c;
1653         struct btree *b         = wbio->wbio.bio.bi_private;
1654
1655         btree_bounce_free(c,
1656                 wbio->bytes,
1657                 wbio->wbio.used_mempool,
1658                 wbio->data);
1659
1660         if (wbio->wbio.failed.nr) {
1661                 unsigned long flags;
1662
1663                 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1664                 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1665                 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1666
1667                 queue_work(c->btree_error_wq, &c->btree_write_error_work);
1668                 return;
1669         }
1670
1671         bio_put(&wbio->wbio.bio);
1672         btree_node_write_done(c, b);
1673 }
1674
1675 static void btree_node_write_endio(struct bio *bio)
1676 {
1677         struct bch_write_bio *wbio      = to_wbio(bio);
1678         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
1679         struct bch_write_bio *orig      = parent ?: wbio;
1680         struct bch_fs *c                = wbio->c;
1681         struct bch_dev *ca              = bch_dev_bkey_exists(c, wbio->dev);
1682         unsigned long flags;
1683
1684         if (wbio->have_ioref)
1685                 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1686
1687         if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
1688                                bch2_blk_status_to_str(bio->bi_status)) ||
1689             bch2_meta_write_fault("btree")) {
1690                 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1691                 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1692                 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1693         }
1694
1695         if (wbio->have_ioref)
1696                 percpu_ref_put(&ca->io_ref);
1697
1698         if (parent) {
1699                 bio_put(bio);
1700                 bio_endio(&parent->bio);
1701         } else {
1702                 struct btree_write_bio *wb =
1703                         container_of(orig, struct btree_write_bio, wbio);
1704
1705                 INIT_WORK(&wb->work, btree_node_write_work);
1706                 queue_work(c->io_complete_wq, &wb->work);
1707         }
1708 }
1709
1710 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1711                                    struct bset *i, unsigned sectors)
1712 {
1713         unsigned whiteout_u64s = 0;
1714         int ret;
1715
1716         if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_btree))
1717                 return -1;
1718
1719         ret = validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false) ?:
1720                 validate_bset(c, NULL, b, i, sectors, WRITE, false);
1721         if (ret) {
1722                 bch2_inconsistent_error(c);
1723                 dump_stack();
1724         }
1725
1726         return ret;
1727 }
1728
1729 static void btree_write_submit(struct work_struct *work)
1730 {
1731         struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1732
1733         bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &wbio->key);
1734 }
1735
1736 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b)
1737 {
1738         struct btree_write_bio *wbio;
1739         struct bset_tree *t;
1740         struct bset *i;
1741         struct btree_node *bn = NULL;
1742         struct btree_node_entry *bne = NULL;
1743         struct bch_extent_ptr *ptr;
1744         struct sort_iter sort_iter;
1745         struct nonce nonce;
1746         unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1747         u64 seq = 0;
1748         bool used_mempool;
1749         unsigned long old, new;
1750         bool validate_before_checksum = false;
1751         void *data;
1752
1753         BUG_ON(btree_node_write_in_flight(b));
1754
1755         if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1756                 return;
1757
1758         /*
1759          * We may only have a read lock on the btree node - the dirty bit is our
1760          * "lock" against racing with other threads that may be trying to start
1761          * a write, we do a write iff we clear the dirty bit. Since setting the
1762          * dirty bit requires a write lock, we can't race with other threads
1763          * redirtying it:
1764          */
1765         do {
1766                 old = new = READ_ONCE(b->flags);
1767
1768                 if (!(old & (1 << BTREE_NODE_dirty)))
1769                         return;
1770
1771                 if (!btree_node_may_write(b))
1772                         return;
1773
1774                 if (old & (1 << BTREE_NODE_never_write))
1775                         return;
1776
1777                 if (old & (1 << BTREE_NODE_write_in_flight)) {
1778                         /*
1779                          * XXX waiting on btree writes with btree locks held -
1780                          * this can deadlock, and we hit the write error path
1781                          */
1782                         bch2_btree_node_wait_on_write(b);
1783                         continue;
1784                 }
1785
1786                 new &= ~(1 << BTREE_NODE_dirty);
1787                 new &= ~(1 << BTREE_NODE_need_write);
1788                 new |=  (1 << BTREE_NODE_write_in_flight);
1789                 new |=  (1 << BTREE_NODE_just_written);
1790                 new ^=  (1 << BTREE_NODE_write_idx);
1791         } while (cmpxchg_acquire(&b->flags, old, new) != old);
1792
1793         atomic_dec(&c->btree_cache.dirty);
1794
1795         BUG_ON(btree_node_fake(b));
1796         BUG_ON((b->will_make_reachable != 0) != !b->written);
1797
1798         BUG_ON(b->written >= c->opts.btree_node_size);
1799         BUG_ON(b->written & (c->opts.block_size - 1));
1800         BUG_ON(bset_written(b, btree_bset_last(b)));
1801         BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1802         BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1803
1804         bch2_sort_whiteouts(c, b);
1805
1806         sort_iter_init(&sort_iter, b);
1807
1808         bytes = !b->written
1809                 ? sizeof(struct btree_node)
1810                 : sizeof(struct btree_node_entry);
1811
1812         bytes += b->whiteout_u64s * sizeof(u64);
1813
1814         for_each_bset(b, t) {
1815                 i = bset(b, t);
1816
1817                 if (bset_written(b, i))
1818                         continue;
1819
1820                 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1821                 sort_iter_add(&sort_iter,
1822                               btree_bkey_first(b, t),
1823                               btree_bkey_last(b, t));
1824                 seq = max(seq, le64_to_cpu(i->journal_seq));
1825         }
1826
1827         BUG_ON(b->written && !seq);
1828
1829         /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
1830         bytes += 8;
1831
1832         /* buffer must be a multiple of the block size */
1833         bytes = round_up(bytes, block_bytes(c));
1834
1835         data = btree_bounce_alloc(c, bytes, &used_mempool);
1836
1837         if (!b->written) {
1838                 bn = data;
1839                 *bn = *b->data;
1840                 i = &bn->keys;
1841         } else {
1842                 bne = data;
1843                 bne->keys = b->data->keys;
1844                 i = &bne->keys;
1845         }
1846
1847         i->journal_seq  = cpu_to_le64(seq);
1848         i->u64s         = 0;
1849
1850         sort_iter_add(&sort_iter,
1851                       unwritten_whiteouts_start(c, b),
1852                       unwritten_whiteouts_end(c, b));
1853         SET_BSET_SEPARATE_WHITEOUTS(i, false);
1854
1855         b->whiteout_u64s = 0;
1856
1857         u64s = bch2_sort_keys(i->start, &sort_iter, false);
1858         le16_add_cpu(&i->u64s, u64s);
1859
1860         set_needs_whiteout(i, false);
1861
1862         /* do we have data to write? */
1863         if (b->written && !i->u64s)
1864                 goto nowrite;
1865
1866         bytes_to_write = vstruct_end(i) - data;
1867         sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1868
1869         memset(data + bytes_to_write, 0,
1870                (sectors_to_write << 9) - bytes_to_write);
1871
1872         BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1873         BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1874         BUG_ON(i->seq != b->data->keys.seq);
1875
1876         i->version = c->sb.version < bcachefs_metadata_version_new_versioning
1877                 ? cpu_to_le16(BCH_BSET_VERSION_OLD)
1878                 : cpu_to_le16(c->sb.version);
1879         SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1880
1881         if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
1882                 validate_before_checksum = true;
1883
1884         /* validate_bset will be modifying: */
1885         if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
1886                 validate_before_checksum = true;
1887
1888         /* if we're going to be encrypting, check metadata validity first: */
1889         if (validate_before_checksum &&
1890             validate_bset_for_write(c, b, i, sectors_to_write))
1891                 goto err;
1892
1893         bset_encrypt(c, i, b->written << 9);
1894
1895         nonce = btree_nonce(i, b->written << 9);
1896
1897         if (bn)
1898                 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1899         else
1900                 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1901
1902         /* if we're not encrypting, check metadata after checksumming: */
1903         if (!validate_before_checksum &&
1904             validate_bset_for_write(c, b, i, sectors_to_write))
1905                 goto err;
1906
1907         /*
1908          * We handle btree write errors by immediately halting the journal -
1909          * after we've done that, we can't issue any subsequent btree writes
1910          * because they might have pointers to new nodes that failed to write.
1911          *
1912          * Furthermore, there's no point in doing any more btree writes because
1913          * with the journal stopped, we're never going to update the journal to
1914          * reflect that those writes were done and the data flushed from the
1915          * journal:
1916          *
1917          * Also on journal error, the pending write may have updates that were
1918          * never journalled (interior nodes, see btree_update_nodes_written()) -
1919          * it's critical that we don't do the write in that case otherwise we
1920          * will have updates visible that weren't in the journal:
1921          *
1922          * Make sure to update b->written so bch2_btree_init_next() doesn't
1923          * break:
1924          */
1925         if (bch2_journal_error(&c->journal) ||
1926             c->opts.nochanges)
1927                 goto err;
1928
1929         trace_btree_write(b, bytes_to_write, sectors_to_write);
1930
1931         wbio = container_of(bio_alloc_bioset(GFP_NOIO,
1932                                 buf_pages(data, sectors_to_write << 9),
1933                                 &c->btree_bio),
1934                             struct btree_write_bio, wbio.bio);
1935         wbio_init(&wbio->wbio.bio);
1936         wbio->data                      = data;
1937         wbio->bytes                     = bytes;
1938         wbio->wbio.c                    = c;
1939         wbio->wbio.used_mempool         = used_mempool;
1940         wbio->wbio.bio.bi_opf           = REQ_OP_WRITE|REQ_META;
1941         wbio->wbio.bio.bi_end_io        = btree_node_write_endio;
1942         wbio->wbio.bio.bi_private       = b;
1943
1944         bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
1945
1946         /*
1947          * If we're appending to a leaf node, we don't technically need FUA -
1948          * this write just needs to be persisted before the next journal write,
1949          * which will be marked FLUSH|FUA.
1950          *
1951          * Similarly if we're writing a new btree root - the pointer is going to
1952          * be in the next journal entry.
1953          *
1954          * But if we're writing a new btree node (that isn't a root) or
1955          * appending to a non leaf btree node, we need either FUA or a flush
1956          * when we write the parent with the new pointer. FUA is cheaper than a
1957          * flush, and writes appending to leaf nodes aren't blocking anything so
1958          * just make all btree node writes FUA to keep things sane.
1959          */
1960
1961         bkey_copy(&wbio->key, &b->key);
1962
1963         bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&wbio->key)), ptr)
1964                 ptr->offset += b->written;
1965
1966         b->written += sectors_to_write;
1967
1968         atomic64_inc(&c->btree_writes_nr);
1969         atomic64_add(sectors_to_write, &c->btree_writes_sectors);
1970
1971         INIT_WORK(&wbio->work, btree_write_submit);
1972         queue_work(c->io_complete_wq, &wbio->work);
1973         return;
1974 err:
1975         set_btree_node_noevict(b);
1976         b->written += sectors_to_write;
1977 nowrite:
1978         btree_bounce_free(c, bytes, used_mempool, data);
1979         btree_node_write_done(c, b);
1980 }
1981
1982 /*
1983  * Work that must be done with write lock held:
1984  */
1985 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1986 {
1987         bool invalidated_iter = false;
1988         struct btree_node_entry *bne;
1989         struct bset_tree *t;
1990
1991         if (!btree_node_just_written(b))
1992                 return false;
1993
1994         BUG_ON(b->whiteout_u64s);
1995
1996         clear_btree_node_just_written(b);
1997
1998         /*
1999          * Note: immediately after write, bset_written() doesn't work - the
2000          * amount of data we had to write after compaction might have been
2001          * smaller than the offset of the last bset.
2002          *
2003          * However, we know that all bsets have been written here, as long as
2004          * we're still holding the write lock:
2005          */
2006
2007         /*
2008          * XXX: decide if we really want to unconditionally sort down to a
2009          * single bset:
2010          */
2011         if (b->nsets > 1) {
2012                 btree_node_sort(c, b, 0, b->nsets, true);
2013                 invalidated_iter = true;
2014         } else {
2015                 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2016         }
2017
2018         for_each_bset(b, t)
2019                 set_needs_whiteout(bset(b, t), true);
2020
2021         bch2_btree_verify(c, b);
2022
2023         /*
2024          * If later we don't unconditionally sort down to a single bset, we have
2025          * to ensure this is still true:
2026          */
2027         BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2028
2029         bne = want_new_bset(c, b);
2030         if (bne)
2031                 bch2_bset_init_next(c, b, bne);
2032
2033         bch2_btree_build_aux_trees(b);
2034
2035         return invalidated_iter;
2036 }
2037
2038 /*
2039  * Use this one if the node is intent locked:
2040  */
2041 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2042                            enum six_lock_type lock_type_held)
2043 {
2044         if (lock_type_held == SIX_LOCK_intent ||
2045             (lock_type_held == SIX_LOCK_read &&
2046              six_lock_tryupgrade(&b->c.lock))) {
2047                 __bch2_btree_node_write(c, b);
2048
2049                 /* don't cycle lock unnecessarily: */
2050                 if (btree_node_just_written(b) &&
2051                     six_trylock_write(&b->c.lock)) {
2052                         bch2_btree_post_write_cleanup(c, b);
2053                         six_unlock_write(&b->c.lock);
2054                 }
2055
2056                 if (lock_type_held == SIX_LOCK_read)
2057                         six_lock_downgrade(&b->c.lock);
2058         } else {
2059                 __bch2_btree_node_write(c, b);
2060                 if (lock_type_held == SIX_LOCK_write &&
2061                     btree_node_just_written(b))
2062                         bch2_btree_post_write_cleanup(c, b);
2063         }
2064 }
2065
2066 static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2067 {
2068         struct bucket_table *tbl;
2069         struct rhash_head *pos;
2070         struct btree *b;
2071         unsigned i;
2072 restart:
2073         rcu_read_lock();
2074         for_each_cached_btree(b, c, tbl, i, pos)
2075                 if (test_bit(flag, &b->flags)) {
2076                         rcu_read_unlock();
2077                         wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2078                         goto restart;
2079
2080                 }
2081         rcu_read_unlock();
2082 }
2083
2084 void bch2_btree_flush_all_reads(struct bch_fs *c)
2085 {
2086         __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2087 }
2088
2089 void bch2_btree_flush_all_writes(struct bch_fs *c)
2090 {
2091         __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2092 }
2093
2094 void bch2_dirty_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c)
2095 {
2096         struct bucket_table *tbl;
2097         struct rhash_head *pos;
2098         struct btree *b;
2099         unsigned i;
2100
2101         rcu_read_lock();
2102         for_each_cached_btree(b, c, tbl, i, pos) {
2103                 unsigned long flags = READ_ONCE(b->flags);
2104
2105                 if (!(flags & (1 << BTREE_NODE_dirty)))
2106                         continue;
2107
2108                 pr_buf(out, "%p d %u n %u l %u w %u b %u r %u:%lu\n",
2109                        b,
2110                        (flags & (1 << BTREE_NODE_dirty)) != 0,
2111                        (flags & (1 << BTREE_NODE_need_write)) != 0,
2112                        b->c.level,
2113                        b->written,
2114                        !list_empty_careful(&b->write_blocked),
2115                        b->will_make_reachable != 0,
2116                        b->will_make_reachable & 1);
2117         }
2118         rcu_read_unlock();
2119 }