]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_io.c
Update bcachefs sources to 9e76e8d98c bcachefs: Fix uninitialized field in hash_check...
[bcachefs-tools-debian] / libbcachefs / btree_io.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "super-io.h"
21
22 #include <trace/events/bcachefs.h>
23
24 static void verify_no_dups(struct btree *b,
25                            struct bkey_packed *start,
26                            struct bkey_packed *end)
27 {
28 #ifdef CONFIG_BCACHEFS_DEBUG
29         struct bkey_packed *k;
30
31         for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) {
32                 struct bkey l = bkey_unpack_key(b, k);
33                 struct bkey r = bkey_unpack_key(b, bkey_next(k));
34
35                 BUG_ON(btree_node_is_extents(b)
36                        ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
37                        : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
38                 //BUG_ON(bkey_cmp_packed(&b->format, k, bkey_next(k)) >= 0);
39         }
40 #endif
41 }
42
43 static void clear_needs_whiteout(struct bset *i)
44 {
45         struct bkey_packed *k;
46
47         for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
48                 k->needs_whiteout = false;
49 }
50
51 static void set_needs_whiteout(struct bset *i)
52 {
53         struct bkey_packed *k;
54
55         for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
56                 k->needs_whiteout = true;
57 }
58
59 static void btree_bounce_free(struct bch_fs *c, unsigned order,
60                               bool used_mempool, void *p)
61 {
62         if (used_mempool)
63                 mempool_free(p, &c->btree_bounce_pool);
64         else
65                 vpfree(p, PAGE_SIZE << order);
66 }
67
68 static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
69                                 bool *used_mempool)
70 {
71         void *p;
72
73         BUG_ON(order > btree_page_order(c));
74
75         *used_mempool = false;
76         p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
77         if (p)
78                 return p;
79
80         *used_mempool = true;
81         return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
82 }
83
84 static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
85                                     bool compacting,
86                                     enum compact_mode mode)
87 {
88         unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
89         unsigned dead_u64s = bset_u64s - b->nr.bset_u64s[t - b->set];
90
91         if (mode == COMPACT_LAZY) {
92                 if (should_compact_bset_lazy(b, t) ||
93                     (compacting && !bset_written(b, bset(b, t))))
94                         return dead_u64s;
95         } else {
96                 if (bset_written(b, bset(b, t)))
97                         return dead_u64s;
98         }
99
100         return 0;
101 }
102
103 bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
104                              enum compact_mode mode)
105 {
106         const struct bkey_format *f = &b->format;
107         struct bset_tree *t;
108         struct bkey_packed *whiteouts = NULL;
109         struct bkey_packed *u_start, *u_pos;
110         struct sort_iter sort_iter;
111         unsigned order, whiteout_u64s = 0, u64s;
112         bool used_mempool, compacting = false;
113
114         for_each_bset(b, t)
115                 whiteout_u64s += should_compact_bset(b, t,
116                                         whiteout_u64s != 0, mode);
117
118         if (!whiteout_u64s)
119                 return false;
120
121         sort_iter_init(&sort_iter, b);
122
123         whiteout_u64s += b->whiteout_u64s;
124         order = get_order(whiteout_u64s * sizeof(u64));
125
126         whiteouts = btree_bounce_alloc(c, order, &used_mempool);
127         u_start = u_pos = whiteouts;
128
129         memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
130                     b->whiteout_u64s);
131         u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
132
133         sort_iter_add(&sort_iter, u_start, u_pos);
134
135         for_each_bset(b, t) {
136                 struct bset *i = bset(b, t);
137                 struct bkey_packed *k, *n, *out, *start, *end;
138                 struct btree_node_entry *src = NULL, *dst = NULL;
139
140                 if (t != b->set && !bset_written(b, i)) {
141                         src = container_of(i, struct btree_node_entry, keys);
142                         dst = max(write_block(b),
143                                   (void *) btree_bkey_last(b, t -1));
144                 }
145
146                 if (!should_compact_bset(b, t, compacting, mode)) {
147                         if (src != dst) {
148                                 memmove(dst, src, sizeof(*src) +
149                                         le16_to_cpu(src->keys.u64s) *
150                                         sizeof(u64));
151                                 i = &dst->keys;
152                                 set_btree_bset(b, t, i);
153                         }
154                         continue;
155                 }
156
157                 compacting = true;
158                 u_start = u_pos;
159                 start = i->start;
160                 end = vstruct_last(i);
161
162                 if (src != dst) {
163                         memmove(dst, src, sizeof(*src));
164                         i = &dst->keys;
165                         set_btree_bset(b, t, i);
166                 }
167
168                 out = i->start;
169
170                 for (k = start; k != end; k = n) {
171                         n = bkey_next(k);
172
173                         if (bkey_deleted(k) && btree_node_is_extents(b))
174                                 continue;
175
176                         if (bkey_whiteout(k) && !k->needs_whiteout)
177                                 continue;
178
179                         if (bkey_whiteout(k)) {
180                                 unreserve_whiteout(b, k);
181                                 memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
182                                 set_bkeyp_val_u64s(f, u_pos, 0);
183                                 u_pos = bkey_next(u_pos);
184                         } else if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
185                                 bkey_copy(out, k);
186                                 out = bkey_next(out);
187                         }
188                 }
189
190                 sort_iter_add(&sort_iter, u_start, u_pos);
191
192                 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
193                         i->u64s = cpu_to_le16((u64 *) out - i->_data);
194                         set_btree_bset_end(b, t);
195                         bch2_bset_set_no_aux_tree(b, t);
196                 }
197         }
198
199         b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
200
201         BUG_ON((void *) unwritten_whiteouts_start(c, b) <
202                (void *) btree_bkey_last(b, bset_tree_last(b)));
203
204         u64s = (btree_node_is_extents(b)
205                 ? bch2_sort_extent_whiteouts
206                 : bch2_sort_key_whiteouts)(unwritten_whiteouts_start(c, b),
207                                            &sort_iter);
208
209         BUG_ON(u64s > b->whiteout_u64s);
210         BUG_ON(u64s != b->whiteout_u64s && !btree_node_is_extents(b));
211         BUG_ON(u_pos != whiteouts && !u64s);
212
213         if (u64s != b->whiteout_u64s) {
214                 void *src = unwritten_whiteouts_start(c, b);
215
216                 b->whiteout_u64s = u64s;
217                 memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
218         }
219
220         verify_no_dups(b,
221                        unwritten_whiteouts_start(c, b),
222                        unwritten_whiteouts_end(c, b));
223
224         btree_bounce_free(c, order, used_mempool, whiteouts);
225
226         if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK)
227                 bch2_btree_build_aux_trees(b);
228
229         bch_btree_keys_u64s_remaining(c, b);
230         bch2_verify_btree_nr_keys(b);
231
232         return true;
233 }
234
235 static bool bch2_drop_whiteouts(struct btree *b)
236 {
237         struct bset_tree *t;
238         bool ret = false;
239
240         for_each_bset(b, t) {
241                 struct bset *i = bset(b, t);
242                 struct bkey_packed *k, *n, *out, *start, *end;
243
244                 if (!should_compact_bset(b, t, true, COMPACT_WRITTEN))
245                         continue;
246
247                 start   = btree_bkey_first(b, t);
248                 end     = btree_bkey_last(b, t);
249
250                 if (!bset_written(b, i) &&
251                     t != b->set) {
252                         struct bset *dst =
253                                max_t(struct bset *, write_block(b),
254                                      (void *) btree_bkey_last(b, t -1));
255
256                         memmove(dst, i, sizeof(struct bset));
257                         i = dst;
258                         set_btree_bset(b, t, i);
259                 }
260
261                 out = i->start;
262
263                 for (k = start; k != end; k = n) {
264                         n = bkey_next(k);
265
266                         if (!bkey_whiteout(k)) {
267                                 bkey_copy(out, k);
268                                 out = bkey_next(out);
269                         }
270                 }
271
272                 i->u64s = cpu_to_le16((u64 *) out - i->_data);
273                 bch2_bset_set_no_aux_tree(b, t);
274                 ret = true;
275         }
276
277         bch2_verify_btree_nr_keys(b);
278
279         return ret;
280 }
281
282 static void btree_node_sort(struct bch_fs *c, struct btree *b,
283                             struct btree_iter *iter,
284                             unsigned start_idx,
285                             unsigned end_idx,
286                             bool filter_whiteouts)
287 {
288         struct btree_node *out;
289         struct sort_iter sort_iter;
290         struct bset_tree *t;
291         struct bset *start_bset = bset(b, &b->set[start_idx]);
292         bool used_mempool = false;
293         u64 start_time, seq = 0;
294         unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
295         bool sorting_entire_node = start_idx == 0 &&
296                 end_idx == b->nsets;
297
298         sort_iter_init(&sort_iter, b);
299
300         for (t = b->set + start_idx;
301              t < b->set + end_idx;
302              t++) {
303                 u64s += le16_to_cpu(bset(b, t)->u64s);
304                 sort_iter_add(&sort_iter,
305                               btree_bkey_first(b, t),
306                               btree_bkey_last(b, t));
307         }
308
309         order = sorting_entire_node
310                 ? btree_page_order(c)
311                 : get_order(__vstruct_bytes(struct btree_node, u64s));
312
313         out = btree_bounce_alloc(c, order, &used_mempool);
314
315         start_time = local_clock();
316
317         if (btree_node_is_extents(b))
318                 filter_whiteouts = bset_written(b, start_bset);
319
320         u64s = (btree_node_is_extents(b)
321                 ? bch2_sort_extents
322                 : bch2_sort_keys)(out->keys.start,
323                                   &sort_iter,
324                                   filter_whiteouts);
325
326         out->keys.u64s = cpu_to_le16(u64s);
327
328         BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
329
330         if (sorting_entire_node)
331                 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
332                                        start_time);
333
334         /* Make sure we preserve bset journal_seq: */
335         for (t = b->set + start_idx; t < b->set + end_idx; t++)
336                 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
337         start_bset->journal_seq = cpu_to_le64(seq);
338
339         if (sorting_entire_node) {
340                 unsigned u64s = le16_to_cpu(out->keys.u64s);
341
342                 BUG_ON(order != btree_page_order(c));
343
344                 /*
345                  * Our temporary buffer is the same size as the btree node's
346                  * buffer, we can just swap buffers instead of doing a big
347                  * memcpy()
348                  */
349                 *out = *b->data;
350                 out->keys.u64s = cpu_to_le16(u64s);
351                 swap(out, b->data);
352                 set_btree_bset(b, b->set, &b->data->keys);
353         } else {
354                 start_bset->u64s = out->keys.u64s;
355                 memcpy_u64s(start_bset->start,
356                             out->keys.start,
357                             le16_to_cpu(out->keys.u64s));
358         }
359
360         for (i = start_idx + 1; i < end_idx; i++)
361                 b->nr.bset_u64s[start_idx] +=
362                         b->nr.bset_u64s[i];
363
364         b->nsets -= shift;
365
366         for (i = start_idx + 1; i < b->nsets; i++) {
367                 b->nr.bset_u64s[i]      = b->nr.bset_u64s[i + shift];
368                 b->set[i]               = b->set[i + shift];
369         }
370
371         for (i = b->nsets; i < MAX_BSETS; i++)
372                 b->nr.bset_u64s[i] = 0;
373
374         set_btree_bset_end(b, &b->set[start_idx]);
375         bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
376
377         btree_bounce_free(c, order, used_mempool, out);
378
379         bch2_verify_btree_nr_keys(b);
380 }
381
382 void bch2_btree_sort_into(struct bch_fs *c,
383                          struct btree *dst,
384                          struct btree *src)
385 {
386         struct btree_nr_keys nr;
387         struct btree_node_iter src_iter;
388         u64 start_time = local_clock();
389
390         BUG_ON(dst->nsets != 1);
391
392         bch2_bset_set_no_aux_tree(dst, dst->set);
393
394         bch2_btree_node_iter_init_from_start(&src_iter, src);
395
396         if (btree_node_is_extents(src))
397                 nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
398                                 src, &src_iter,
399                                 &dst->format,
400                                 true);
401         else
402                 nr = bch2_sort_repack(btree_bset_first(dst),
403                                 src, &src_iter,
404                                 &dst->format,
405                                 true);
406
407         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
408                                start_time);
409
410         set_btree_bset_end(dst, dst->set);
411
412         dst->nr.live_u64s       += nr.live_u64s;
413         dst->nr.bset_u64s[0]    += nr.bset_u64s[0];
414         dst->nr.packed_keys     += nr.packed_keys;
415         dst->nr.unpacked_keys   += nr.unpacked_keys;
416
417         bch2_verify_btree_nr_keys(dst);
418 }
419
420 #define SORT_CRIT       (4096 / sizeof(u64))
421
422 /*
423  * We're about to add another bset to the btree node, so if there's currently
424  * too many bsets - sort some of them together:
425  */
426 static bool btree_node_compact(struct bch_fs *c, struct btree *b,
427                                struct btree_iter *iter)
428 {
429         unsigned unwritten_idx;
430         bool ret = false;
431
432         for (unwritten_idx = 0;
433              unwritten_idx < b->nsets;
434              unwritten_idx++)
435                 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
436                         break;
437
438         if (b->nsets - unwritten_idx > 1) {
439                 btree_node_sort(c, b, iter, unwritten_idx,
440                                 b->nsets, false);
441                 ret = true;
442         }
443
444         if (unwritten_idx > 1) {
445                 btree_node_sort(c, b, iter, 0, unwritten_idx, false);
446                 ret = true;
447         }
448
449         return ret;
450 }
451
452 void bch2_btree_build_aux_trees(struct btree *b)
453 {
454         struct bset_tree *t;
455
456         for_each_bset(b, t)
457                 bch2_bset_build_aux_tree(b, t,
458                                 !bset_written(b, bset(b, t)) &&
459                                 t == bset_tree_last(b));
460 }
461
462 /*
463  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
464  * inserted into
465  *
466  * Safe to call if there already is an unwritten bset - will only add a new bset
467  * if @b doesn't already have one.
468  *
469  * Returns true if we sorted (i.e. invalidated iterators
470  */
471 void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
472                           struct btree_iter *iter)
473 {
474         struct btree_node_entry *bne;
475         bool did_sort;
476
477         EBUG_ON(!(b->lock.state.seq & 1));
478         EBUG_ON(iter && iter->l[b->level].b != b);
479
480         did_sort = btree_node_compact(c, b, iter);
481
482         bne = want_new_bset(c, b);
483         if (bne)
484                 bch2_bset_init_next(c, b, bne);
485
486         bch2_btree_build_aux_trees(b);
487
488         if (iter && did_sort)
489                 bch2_btree_iter_reinit_node(iter, b);
490 }
491
492 static struct nonce btree_nonce(struct bset *i, unsigned offset)
493 {
494         return (struct nonce) {{
495                 [0] = cpu_to_le32(offset),
496                 [1] = ((__le32 *) &i->seq)[0],
497                 [2] = ((__le32 *) &i->seq)[1],
498                 [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
499         }};
500 }
501
502 static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
503 {
504         struct nonce nonce = btree_nonce(i, offset);
505
506         if (!offset) {
507                 struct btree_node *bn = container_of(i, struct btree_node, keys);
508                 unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
509
510                 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
511                              bytes);
512
513                 nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
514         }
515
516         bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
517                      vstruct_end(i) - (void *) i->_data);
518 }
519
520 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
521                           struct btree *b, struct bset *i,
522                           unsigned offset, int write)
523 {
524         pr_buf(out, "error validating btree node %s"
525                "at btree %u level %u/%u\n"
526                "pos %llu:%llu node offset %u",
527                write ? "before write " : "",
528                b->btree_id, b->level,
529                c->btree_roots[b->btree_id].level,
530                b->key.k.p.inode, b->key.k.p.offset,
531                b->written);
532         if (i)
533                 pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
534 }
535
536 enum btree_err_type {
537         BTREE_ERR_FIXABLE,
538         BTREE_ERR_WANT_RETRY,
539         BTREE_ERR_MUST_RETRY,
540         BTREE_ERR_FATAL,
541 };
542
543 enum btree_validate_ret {
544         BTREE_RETRY_READ = 64,
545 };
546
547 #define btree_err(type, c, b, i, msg, ...)                              \
548 ({                                                                      \
549         __label__ out;                                                  \
550         char _buf[300];                                                 \
551         struct printbuf out = PBUF(_buf);                               \
552                                                                         \
553         btree_err_msg(&out, c, b, i, b->written, write);                \
554         pr_buf(&out, ": " msg, ##__VA_ARGS__);                          \
555                                                                         \
556         if (type == BTREE_ERR_FIXABLE &&                                \
557             write == READ &&                                            \
558             !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) {             \
559                 mustfix_fsck_err(c, "%s", _buf);                        \
560                 goto out;                                               \
561         }                                                               \
562                                                                         \
563         switch (write) {                                                \
564         case READ:                                                      \
565                 bch_err(c, "%s", _buf);                                 \
566                                                                         \
567                 switch (type) {                                         \
568                 case BTREE_ERR_FIXABLE:                                 \
569                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
570                         goto fsck_err;                                  \
571                 case BTREE_ERR_WANT_RETRY:                              \
572                         if (have_retry) {                               \
573                                 ret = BTREE_RETRY_READ;                 \
574                                 goto fsck_err;                          \
575                         }                                               \
576                         break;                                          \
577                 case BTREE_ERR_MUST_RETRY:                              \
578                         ret = BTREE_RETRY_READ;                         \
579                         goto fsck_err;                                  \
580                 case BTREE_ERR_FATAL:                                   \
581                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
582                         goto fsck_err;                                  \
583                 }                                                       \
584                 break;                                                  \
585         case WRITE:                                                     \
586                 bch_err(c, "corrupt metadata before write: %s", _buf);  \
587                                                                         \
588                 if (bch2_fs_inconsistent(c)) {                          \
589                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
590                         goto fsck_err;                                  \
591                 }                                                       \
592                 break;                                                  \
593         }                                                               \
594 out:                                                                    \
595         true;                                                           \
596 })
597
598 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
599
600 static int validate_bset(struct bch_fs *c, struct btree *b,
601                          struct bset *i, unsigned sectors,
602                          unsigned *whiteout_u64s, int write,
603                          bool have_retry)
604 {
605         struct bkey_packed *k, *prev = NULL;
606         struct bpos prev_pos = POS_MIN;
607         bool seen_non_whiteout = false;
608         unsigned version;
609         const char *err;
610         int ret = 0;
611
612         if (i == &b->data->keys) {
613                 /* These indicate that we read the wrong btree node: */
614                 btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
615                              BTREE_ERR_MUST_RETRY, c, b, i,
616                              "incorrect btree id");
617
618                 btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
619                              BTREE_ERR_MUST_RETRY, c, b, i,
620                              "incorrect level");
621
622                 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
623                         u64 *p = (u64 *) &b->data->ptr;
624
625                         *p = swab64(*p);
626                         bch2_bpos_swab(&b->data->min_key);
627                         bch2_bpos_swab(&b->data->max_key);
628                 }
629
630                 btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
631                              BTREE_ERR_MUST_RETRY, c, b, i,
632                              "incorrect max key");
633
634                 /* XXX: ideally we would be validating min_key too */
635 #if 0
636                 /*
637                  * not correct anymore, due to btree node write error
638                  * handling
639                  *
640                  * need to add b->data->seq to btree keys and verify
641                  * against that
642                  */
643                 btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
644                                                   b->data->ptr),
645                              BTREE_ERR_FATAL, c, b, i,
646                              "incorrect backpointer");
647 #endif
648                 err = bch2_bkey_format_validate(&b->data->format);
649                 btree_err_on(err,
650                              BTREE_ERR_FATAL, c, b, i,
651                              "invalid bkey format: %s", err);
652         }
653
654         version = le16_to_cpu(i->version);
655         btree_err_on((version != BCH_BSET_VERSION_OLD &&
656                       version < bcachefs_metadata_version_min) ||
657                      version >= bcachefs_metadata_version_max,
658                      BTREE_ERR_FATAL, c, b, i,
659                      "unsupported bset version");
660
661         if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
662                          BTREE_ERR_FIXABLE, c, b, i,
663                          "bset past end of btree node")) {
664                 i->u64s = 0;
665                 return 0;
666         }
667
668         btree_err_on(b->written && !i->u64s,
669                      BTREE_ERR_FIXABLE, c, b, i,
670                      "empty bset");
671
672         if (!BSET_SEPARATE_WHITEOUTS(i)) {
673                 seen_non_whiteout = true;
674                 *whiteout_u64s = 0;
675         }
676
677         for (k = i->start;
678              k != vstruct_last(i);) {
679                 struct bkey_s_c u;
680                 struct bkey tmp;
681                 const char *invalid;
682
683                 if (btree_err_on(!k->u64s,
684                                  BTREE_ERR_FIXABLE, c, b, i,
685                                  "KEY_U64s 0: %zu bytes of metadata lost",
686                                  vstruct_end(i) - (void *) k)) {
687                         i->u64s = cpu_to_le16((u64 *) k - i->_data);
688                         break;
689                 }
690
691                 if (btree_err_on(bkey_next(k) > vstruct_last(i),
692                                  BTREE_ERR_FIXABLE, c, b, i,
693                                  "key extends past end of bset")) {
694                         i->u64s = cpu_to_le16((u64 *) k - i->_data);
695                         break;
696                 }
697
698                 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
699                                  BTREE_ERR_FIXABLE, c, b, i,
700                                  "invalid bkey format %u", k->format)) {
701                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
702                         memmove_u64s_down(k, bkey_next(k),
703                                           (u64 *) vstruct_end(i) - (u64 *) k);
704                         continue;
705                 }
706
707                 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
708                         bch2_bkey_swab(&b->format, k);
709
710                 if (!write &&
711                     version < bcachefs_metadata_version_bkey_renumber)
712                         bch2_bkey_renumber(btree_node_type(b), k, write);
713
714                 u = bkey_disassemble(b, k, &tmp);
715
716                 invalid = __bch2_bkey_invalid(c, u, btree_node_type(b)) ?:
717                         bch2_bkey_in_btree_node(b, u) ?:
718                         (write ? bch2_bkey_val_invalid(c, u) : NULL);
719                 if (invalid) {
720                         char buf[160];
721
722                         bch2_bkey_val_to_text(&PBUF(buf), c, u);
723                         btree_err(BTREE_ERR_FIXABLE, c, b, i,
724                                   "invalid bkey:\n%s\n%s", invalid, buf);
725
726                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
727                         memmove_u64s_down(k, bkey_next(k),
728                                           (u64 *) vstruct_end(i) - (u64 *) k);
729                         continue;
730                 }
731
732                 if (write &&
733                     version < bcachefs_metadata_version_bkey_renumber)
734                         bch2_bkey_renumber(btree_node_type(b), k, write);
735
736                 /*
737                  * with the separate whiteouts thing (used for extents), the
738                  * second set of keys actually can have whiteouts too, so we
739                  * can't solely go off bkey_whiteout()...
740                  */
741
742                 if (!seen_non_whiteout &&
743                     (!bkey_whiteout(k) ||
744                      (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0))) {
745                         *whiteout_u64s = k->_data - i->_data;
746                         seen_non_whiteout = true;
747                 } else if (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0) {
748                         btree_err(BTREE_ERR_FATAL, c, b, i,
749                                   "keys out of order: %llu:%llu > %llu:%llu",
750                                   prev_pos.inode,
751                                   prev_pos.offset,
752                                   u.k->p.inode,
753                                   bkey_start_offset(u.k));
754                         /* XXX: repair this */
755                 }
756
757                 prev_pos = u.k->p;
758                 prev = k;
759                 k = bkey_next(k);
760         }
761
762         SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
763 fsck_err:
764         return ret;
765 }
766
767 int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
768 {
769         struct btree_node_entry *bne;
770         struct btree_node_iter_large *iter;
771         struct btree_node *sorted;
772         struct bkey_packed *k;
773         struct bset *i;
774         bool used_mempool, blacklisted;
775         unsigned u64s;
776         int ret, retry_read = 0, write = READ;
777
778         iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
779         iter->used = 0;
780
781         if (bch2_meta_read_fault("btree"))
782                 btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
783                           "dynamic fault");
784
785         btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
786                      BTREE_ERR_MUST_RETRY, c, b, NULL,
787                      "bad magic");
788
789         btree_err_on(!b->data->keys.seq,
790                      BTREE_ERR_MUST_RETRY, c, b, NULL,
791                      "bad btree header");
792
793         while (b->written < c->opts.btree_node_size) {
794                 unsigned sectors, whiteout_u64s = 0;
795                 struct nonce nonce;
796                 struct bch_csum csum;
797                 bool first = !b->written;
798
799                 if (!b->written) {
800                         i = &b->data->keys;
801
802                         btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
803                                      BTREE_ERR_WANT_RETRY, c, b, i,
804                                      "unknown checksum type");
805
806                         nonce = btree_nonce(i, b->written << 9);
807                         csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
808
809                         btree_err_on(bch2_crc_cmp(csum, b->data->csum),
810                                      BTREE_ERR_WANT_RETRY, c, b, i,
811                                      "invalid checksum");
812
813                         bset_encrypt(c, i, b->written << 9);
814
815                         sectors = vstruct_sectors(b->data, c->block_bits);
816
817                         btree_node_set_format(b, b->data->format);
818                 } else {
819                         bne = write_block(b);
820                         i = &bne->keys;
821
822                         if (i->seq != b->data->keys.seq)
823                                 break;
824
825                         btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
826                                      BTREE_ERR_WANT_RETRY, c, b, i,
827                                      "unknown checksum type");
828
829                         nonce = btree_nonce(i, b->written << 9);
830                         csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
831
832                         btree_err_on(bch2_crc_cmp(csum, bne->csum),
833                                      BTREE_ERR_WANT_RETRY, c, b, i,
834                                      "invalid checksum");
835
836                         bset_encrypt(c, i, b->written << 9);
837
838                         sectors = vstruct_sectors(bne, c->block_bits);
839                 }
840
841                 ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
842                                     READ, have_retry);
843                 if (ret)
844                         goto fsck_err;
845
846                 b->written += sectors;
847
848                 blacklisted = bch2_journal_seq_is_blacklisted(c,
849                                         le64_to_cpu(i->journal_seq),
850                                         true);
851
852                 btree_err_on(blacklisted && first,
853                              BTREE_ERR_FIXABLE, c, b, i,
854                              "first btree node bset has blacklisted journal seq");
855                 if (blacklisted && !first)
856                         continue;
857
858                 bch2_btree_node_iter_large_push(iter, b,
859                                            i->start,
860                                            vstruct_idx(i, whiteout_u64s));
861
862                 bch2_btree_node_iter_large_push(iter, b,
863                                            vstruct_idx(i, whiteout_u64s),
864                                            vstruct_last(i));
865         }
866
867         for (bne = write_block(b);
868              bset_byte_offset(b, bne) < btree_bytes(c);
869              bne = (void *) bne + block_bytes(c))
870                 btree_err_on(bne->keys.seq == b->data->keys.seq,
871                              BTREE_ERR_WANT_RETRY, c, b, NULL,
872                              "found bset signature after last bset");
873
874         sorted = btree_bounce_alloc(c, btree_page_order(c), &used_mempool);
875         sorted->keys.u64s = 0;
876
877         set_btree_bset(b, b->set, &b->data->keys);
878
879         b->nr = btree_node_is_extents(b)
880                 ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
881                 : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter);
882
883         u64s = le16_to_cpu(sorted->keys.u64s);
884         *sorted = *b->data;
885         sorted->keys.u64s = cpu_to_le16(u64s);
886         swap(sorted, b->data);
887         set_btree_bset(b, b->set, &b->data->keys);
888         b->nsets = 1;
889
890         BUG_ON(b->nr.live_u64s != u64s);
891
892         btree_bounce_free(c, btree_page_order(c), used_mempool, sorted);
893
894         i = &b->data->keys;
895         for (k = i->start; k != vstruct_last(i);) {
896                 struct bkey tmp;
897                 struct bkey_s_c u = bkey_disassemble(b, k, &tmp);
898                 const char *invalid = bch2_bkey_val_invalid(c, u);
899
900                 if (invalid ||
901                     (inject_invalid_keys(c) &&
902                      !bversion_cmp(u.k->version, MAX_VERSION))) {
903                         char buf[160];
904
905                         bch2_bkey_val_to_text(&PBUF(buf), c, u);
906                         btree_err(BTREE_ERR_FIXABLE, c, b, i,
907                                   "invalid bkey %s: %s", buf, invalid);
908
909                         btree_keys_account_key_drop(&b->nr, 0, k);
910
911                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
912                         memmove_u64s_down(k, bkey_next(k),
913                                           (u64 *) vstruct_end(i) - (u64 *) k);
914                         set_btree_bset_end(b, b->set);
915                         continue;
916                 }
917
918                 k = bkey_next(k);
919         }
920
921         bch2_bset_build_aux_tree(b, b->set, false);
922
923         set_needs_whiteout(btree_bset_first(b));
924
925         btree_node_reset_sib_u64s(b);
926 out:
927         mempool_free(iter, &c->fill_iter);
928         return retry_read;
929 fsck_err:
930         if (ret == BTREE_RETRY_READ) {
931                 retry_read = 1;
932         } else {
933                 bch2_inconsistent_error(c);
934                 set_btree_node_read_error(b);
935         }
936         goto out;
937 }
938
939 static void btree_node_read_work(struct work_struct *work)
940 {
941         struct btree_read_bio *rb =
942                 container_of(work, struct btree_read_bio, work);
943         struct bch_fs *c        = rb->c;
944         struct bch_dev *ca      = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
945         struct btree *b         = rb->bio.bi_private;
946         struct bio *bio         = &rb->bio;
947         struct bch_io_failures failed = { .nr = 0 };
948         bool can_retry;
949
950         goto start;
951         while (1) {
952                 bch_info(c, "retrying read");
953                 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
954                 rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
955                 bio_reset(bio);
956                 bio->bi_opf             = REQ_OP_READ|REQ_SYNC|REQ_META;
957                 bio->bi_iter.bi_sector  = rb->pick.ptr.offset;
958                 bio->bi_iter.bi_size    = btree_bytes(c);
959
960                 if (rb->have_ioref) {
961                         bio_set_dev(bio, ca->disk_sb.bdev);
962                         submit_bio_wait(bio);
963                 } else {
964                         bio->bi_status = BLK_STS_REMOVED;
965                 }
966 start:
967                 bch2_dev_io_err_on(bio->bi_status, ca, "btree read");
968                 if (rb->have_ioref)
969                         percpu_ref_put(&ca->io_ref);
970                 rb->have_ioref = false;
971
972                 bch2_mark_io_failure(&failed, &rb->pick);
973
974                 can_retry = bch2_bkey_pick_read_device(c,
975                                 bkey_i_to_s_c(&b->key),
976                                 &failed, &rb->pick) > 0;
977
978                 if (!bio->bi_status &&
979                     !bch2_btree_node_read_done(c, b, can_retry))
980                         break;
981
982                 if (!can_retry) {
983                         set_btree_node_read_error(b);
984                         break;
985                 }
986         }
987
988         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
989                                rb->start_time);
990         bio_put(&rb->bio);
991         clear_btree_node_read_in_flight(b);
992         wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
993 }
994
995 static void btree_node_read_endio(struct bio *bio)
996 {
997         struct btree_read_bio *rb =
998                 container_of(bio, struct btree_read_bio, bio);
999         struct bch_fs *c        = rb->c;
1000
1001         if (rb->have_ioref) {
1002                 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1003                 bch2_latency_acct(ca, rb->start_time, READ);
1004         }
1005
1006         queue_work(system_unbound_wq, &rb->work);
1007 }
1008
1009 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1010                           bool sync)
1011 {
1012         struct extent_ptr_decoded pick;
1013         struct btree_read_bio *rb;
1014         struct bch_dev *ca;
1015         struct bio *bio;
1016         int ret;
1017
1018         trace_btree_read(c, b);
1019
1020         ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1021                                          NULL, &pick);
1022         if (bch2_fs_fatal_err_on(ret <= 0, c,
1023                         "btree node read error: no device to read from")) {
1024                 set_btree_node_read_error(b);
1025                 return;
1026         }
1027
1028         ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1029
1030         bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
1031                                                    btree_bytes(c)),
1032                                &c->btree_bio);
1033         rb = container_of(bio, struct btree_read_bio, bio);
1034         rb->c                   = c;
1035         rb->start_time          = local_clock();
1036         rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1037         rb->pick                = pick;
1038         INIT_WORK(&rb->work, btree_node_read_work);
1039         bio->bi_opf             = REQ_OP_READ|REQ_SYNC|REQ_META;
1040         bio->bi_iter.bi_sector  = pick.ptr.offset;
1041         bio->bi_end_io          = btree_node_read_endio;
1042         bio->bi_private         = b;
1043         bch2_bio_map(bio, b->data, btree_bytes(c));
1044
1045         set_btree_node_read_in_flight(b);
1046
1047         if (rb->have_ioref) {
1048                 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_BTREE],
1049                              bio_sectors(bio));
1050                 bio_set_dev(bio, ca->disk_sb.bdev);
1051
1052                 if (sync) {
1053                         submit_bio_wait(bio);
1054
1055                         bio->bi_private = b;
1056                         btree_node_read_work(&rb->work);
1057                 } else {
1058                         submit_bio(bio);
1059                 }
1060         } else {
1061                 bio->bi_status = BLK_STS_REMOVED;
1062
1063                 if (sync)
1064                         btree_node_read_work(&rb->work);
1065                 else
1066                         queue_work(system_unbound_wq, &rb->work);
1067
1068         }
1069 }
1070
1071 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1072                         const struct bkey_i *k, unsigned level)
1073 {
1074         struct closure cl;
1075         struct btree *b;
1076         int ret;
1077
1078         closure_init_stack(&cl);
1079
1080         do {
1081                 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1082                 closure_sync(&cl);
1083         } while (ret);
1084
1085         b = bch2_btree_node_mem_alloc(c);
1086         bch2_btree_cache_cannibalize_unlock(c);
1087
1088         BUG_ON(IS_ERR(b));
1089
1090         bkey_copy(&b->key, k);
1091         BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1092
1093         bch2_btree_node_read(c, b, true);
1094
1095         if (btree_node_read_error(b)) {
1096                 bch2_btree_node_hash_remove(&c->btree_cache, b);
1097
1098                 mutex_lock(&c->btree_cache.lock);
1099                 list_move(&b->list, &c->btree_cache.freeable);
1100                 mutex_unlock(&c->btree_cache.lock);
1101
1102                 ret = -EIO;
1103                 goto err;
1104         }
1105
1106         bch2_btree_set_root_for_read(c, b);
1107 err:
1108         six_unlock_write(&b->lock);
1109         six_unlock_intent(&b->lock);
1110
1111         return ret;
1112 }
1113
1114 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1115                               struct btree_write *w)
1116 {
1117         unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1118
1119         do {
1120                 old = new = v;
1121                 if (!(old & 1))
1122                         break;
1123
1124                 new &= ~1UL;
1125         } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1126
1127         if (old & 1)
1128                 closure_put(&((struct btree_update *) new)->cl);
1129
1130         bch2_journal_pin_drop(&c->journal, &w->journal);
1131         closure_wake_up(&w->wait);
1132 }
1133
1134 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1135 {
1136         struct btree_write *w = btree_prev_write(b);
1137
1138         bch2_btree_complete_write(c, b, w);
1139         btree_node_io_unlock(b);
1140 }
1141
1142 static void bch2_btree_node_write_error(struct bch_fs *c,
1143                                         struct btree_write_bio *wbio)
1144 {
1145         struct btree *b         = wbio->wbio.bio.bi_private;
1146         __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1147         struct bkey_i_btree_ptr *new_key;
1148         struct bkey_s_btree_ptr bp;
1149         struct bch_extent_ptr *ptr;
1150         struct btree_trans trans;
1151         struct btree_iter *iter;
1152         int ret;
1153
1154         bch2_trans_init(&trans, c, 0, 0);
1155
1156         iter = bch2_trans_get_node_iter(&trans, b->btree_id, b->key.k.p,
1157                                         BTREE_MAX_DEPTH, b->level, 0);
1158 retry:
1159         ret = bch2_btree_iter_traverse(iter);
1160         if (ret)
1161                 goto err;
1162
1163         /* has node been freed? */
1164         if (iter->l[b->level].b != b) {
1165                 /* node has been freed: */
1166                 BUG_ON(!btree_node_dying(b));
1167                 goto out;
1168         }
1169
1170         BUG_ON(!btree_node_hashed(b));
1171
1172         bkey_copy(&tmp.k, &b->key);
1173
1174         new_key = bkey_i_to_btree_ptr(&tmp.k);
1175         bp = btree_ptr_i_to_s(new_key);
1176
1177         bch2_bkey_drop_ptrs(bkey_i_to_s(&tmp.k), ptr,
1178                 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1179
1180         if (!bch2_bkey_nr_ptrs(bp.s_c))
1181                 goto err;
1182
1183         ret = bch2_btree_node_update_key(c, iter, b, new_key);
1184         if (ret == -EINTR)
1185                 goto retry;
1186         if (ret)
1187                 goto err;
1188 out:
1189         bch2_trans_exit(&trans);
1190         bio_put(&wbio->wbio.bio);
1191         btree_node_write_done(c, b);
1192         return;
1193 err:
1194         set_btree_node_noevict(b);
1195         bch2_fs_fatal_error(c, "fatal error writing btree node");
1196         goto out;
1197 }
1198
1199 void bch2_btree_write_error_work(struct work_struct *work)
1200 {
1201         struct bch_fs *c = container_of(work, struct bch_fs,
1202                                         btree_write_error_work);
1203         struct bio *bio;
1204
1205         while (1) {
1206                 spin_lock_irq(&c->btree_write_error_lock);
1207                 bio = bio_list_pop(&c->btree_write_error_list);
1208                 spin_unlock_irq(&c->btree_write_error_lock);
1209
1210                 if (!bio)
1211                         break;
1212
1213                 bch2_btree_node_write_error(c,
1214                         container_of(bio, struct btree_write_bio, wbio.bio));
1215         }
1216 }
1217
1218 static void btree_node_write_work(struct work_struct *work)
1219 {
1220         struct btree_write_bio *wbio =
1221                 container_of(work, struct btree_write_bio, work);
1222         struct bch_fs *c        = wbio->wbio.c;
1223         struct btree *b         = wbio->wbio.bio.bi_private;
1224
1225         btree_bounce_free(c,
1226                 wbio->wbio.order,
1227                 wbio->wbio.used_mempool,
1228                 wbio->data);
1229
1230         if (wbio->wbio.failed.nr) {
1231                 unsigned long flags;
1232
1233                 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1234                 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1235                 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1236
1237                 queue_work(c->wq, &c->btree_write_error_work);
1238                 return;
1239         }
1240
1241         bio_put(&wbio->wbio.bio);
1242         btree_node_write_done(c, b);
1243 }
1244
1245 static void btree_node_write_endio(struct bio *bio)
1246 {
1247         struct bch_write_bio *wbio      = to_wbio(bio);
1248         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
1249         struct bch_write_bio *orig      = parent ?: wbio;
1250         struct bch_fs *c                = wbio->c;
1251         struct bch_dev *ca              = bch_dev_bkey_exists(c, wbio->dev);
1252         unsigned long flags;
1253
1254         if (wbio->have_ioref)
1255                 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1256
1257         if (bio->bi_status == BLK_STS_REMOVED ||
1258             bch2_dev_io_err_on(bio->bi_status, ca, "btree write") ||
1259             bch2_meta_write_fault("btree")) {
1260                 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1261                 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1262                 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1263         }
1264
1265         if (wbio->have_ioref)
1266                 percpu_ref_put(&ca->io_ref);
1267
1268         if (parent) {
1269                 bio_put(bio);
1270                 bio_endio(&parent->bio);
1271         } else {
1272                 struct btree_write_bio *wb =
1273                         container_of(orig, struct btree_write_bio, wbio);
1274
1275                 INIT_WORK(&wb->work, btree_node_write_work);
1276                 queue_work(system_unbound_wq, &wb->work);
1277         }
1278 }
1279
1280 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1281                                    struct bset *i, unsigned sectors)
1282 {
1283         unsigned whiteout_u64s = 0;
1284         int ret;
1285
1286         if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
1287                 return -1;
1288
1289         ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
1290         if (ret)
1291                 bch2_inconsistent_error(c);
1292
1293         return ret;
1294 }
1295
1296 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1297                             enum six_lock_type lock_type_held)
1298 {
1299         struct btree_write_bio *wbio;
1300         struct bset_tree *t;
1301         struct bset *i;
1302         struct btree_node *bn = NULL;
1303         struct btree_node_entry *bne = NULL;
1304         BKEY_PADDED(key) k;
1305         struct bch_extent_ptr *ptr;
1306         struct sort_iter sort_iter;
1307         struct nonce nonce;
1308         unsigned bytes_to_write, sectors_to_write, order, bytes, u64s;
1309         u64 seq = 0;
1310         bool used_mempool;
1311         unsigned long old, new;
1312         bool validate_before_checksum = false;
1313         void *data;
1314
1315         if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1316                 return;
1317
1318         /*
1319          * We may only have a read lock on the btree node - the dirty bit is our
1320          * "lock" against racing with other threads that may be trying to start
1321          * a write, we do a write iff we clear the dirty bit. Since setting the
1322          * dirty bit requires a write lock, we can't race with other threads
1323          * redirtying it:
1324          */
1325         do {
1326                 old = new = READ_ONCE(b->flags);
1327
1328                 if (!(old & (1 << BTREE_NODE_dirty)))
1329                         return;
1330
1331                 if (!btree_node_may_write(b))
1332                         return;
1333
1334                 if (old & (1 << BTREE_NODE_write_in_flight)) {
1335                         btree_node_wait_on_io(b);
1336                         continue;
1337                 }
1338
1339                 new &= ~(1 << BTREE_NODE_dirty);
1340                 new &= ~(1 << BTREE_NODE_need_write);
1341                 new |=  (1 << BTREE_NODE_write_in_flight);
1342                 new |=  (1 << BTREE_NODE_just_written);
1343                 new ^=  (1 << BTREE_NODE_write_idx);
1344         } while (cmpxchg_acquire(&b->flags, old, new) != old);
1345
1346         BUG_ON(btree_node_fake(b));
1347         BUG_ON((b->will_make_reachable != 0) != !b->written);
1348
1349         BUG_ON(b->written >= c->opts.btree_node_size);
1350         BUG_ON(b->written & (c->opts.block_size - 1));
1351         BUG_ON(bset_written(b, btree_bset_last(b)));
1352         BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1353         BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1354
1355         /*
1356          * We can't block on six_lock_write() here; another thread might be
1357          * trying to get a journal reservation with read locks held, and getting
1358          * a journal reservation might be blocked on flushing the journal and
1359          * doing btree writes:
1360          */
1361         if (lock_type_held == SIX_LOCK_intent &&
1362             six_trylock_write(&b->lock)) {
1363                 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
1364                 six_unlock_write(&b->lock);
1365         } else {
1366                 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
1367         }
1368
1369         BUG_ON(b->uncompacted_whiteout_u64s);
1370
1371         sort_iter_init(&sort_iter, b);
1372
1373         bytes = !b->written
1374                 ? sizeof(struct btree_node)
1375                 : sizeof(struct btree_node_entry);
1376
1377         bytes += b->whiteout_u64s * sizeof(u64);
1378
1379         for_each_bset(b, t) {
1380                 i = bset(b, t);
1381
1382                 if (bset_written(b, i))
1383                         continue;
1384
1385                 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1386                 sort_iter_add(&sort_iter,
1387                               btree_bkey_first(b, t),
1388                               btree_bkey_last(b, t));
1389                 seq = max(seq, le64_to_cpu(i->journal_seq));
1390         }
1391
1392         order = get_order(bytes);
1393         data = btree_bounce_alloc(c, order, &used_mempool);
1394
1395         if (!b->written) {
1396                 bn = data;
1397                 *bn = *b->data;
1398                 i = &bn->keys;
1399         } else {
1400                 bne = data;
1401                 bne->keys = b->data->keys;
1402                 i = &bne->keys;
1403         }
1404
1405         i->journal_seq  = cpu_to_le64(seq);
1406         i->u64s         = 0;
1407
1408         if (!btree_node_is_extents(b)) {
1409                 sort_iter_add(&sort_iter,
1410                               unwritten_whiteouts_start(c, b),
1411                               unwritten_whiteouts_end(c, b));
1412                 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1413         } else {
1414                 memcpy_u64s(i->start,
1415                             unwritten_whiteouts_start(c, b),
1416                             b->whiteout_u64s);
1417                 i->u64s = cpu_to_le16(b->whiteout_u64s);
1418                 SET_BSET_SEPARATE_WHITEOUTS(i, true);
1419         }
1420
1421         b->whiteout_u64s = 0;
1422
1423         u64s = btree_node_is_extents(b)
1424                 ? bch2_sort_extents(vstruct_last(i), &sort_iter, false)
1425                 : bch2_sort_keys(i->start, &sort_iter, false);
1426         le16_add_cpu(&i->u64s, u64s);
1427
1428         clear_needs_whiteout(i);
1429
1430         /* do we have data to write? */
1431         if (b->written && !i->u64s)
1432                 goto nowrite;
1433
1434         bytes_to_write = vstruct_end(i) - data;
1435         sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1436
1437         memset(data + bytes_to_write, 0,
1438                (sectors_to_write << 9) - bytes_to_write);
1439
1440         BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1441         BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1442         BUG_ON(i->seq != b->data->keys.seq);
1443
1444         i->version = c->sb.version < bcachefs_metadata_version_new_versioning
1445                 ? cpu_to_le16(BCH_BSET_VERSION_OLD)
1446                 : cpu_to_le16(c->sb.version);
1447         SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1448
1449         if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
1450                 validate_before_checksum = true;
1451
1452         /* validate_bset will be modifying: */
1453         if (le16_to_cpu(i->version) <
1454             bcachefs_metadata_version_bkey_renumber)
1455                 validate_before_checksum = true;
1456
1457         /* if we're going to be encrypting, check metadata validity first: */
1458         if (validate_before_checksum &&
1459             validate_bset_for_write(c, b, i, sectors_to_write))
1460                 goto err;
1461
1462         bset_encrypt(c, i, b->written << 9);
1463
1464         nonce = btree_nonce(i, b->written << 9);
1465
1466         if (bn)
1467                 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1468         else
1469                 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1470
1471         /* if we're not encrypting, check metadata after checksumming: */
1472         if (!validate_before_checksum &&
1473             validate_bset_for_write(c, b, i, sectors_to_write))
1474                 goto err;
1475
1476         /*
1477          * We handle btree write errors by immediately halting the journal -
1478          * after we've done that, we can't issue any subsequent btree writes
1479          * because they might have pointers to new nodes that failed to write.
1480          *
1481          * Furthermore, there's no point in doing any more btree writes because
1482          * with the journal stopped, we're never going to update the journal to
1483          * reflect that those writes were done and the data flushed from the
1484          * journal:
1485          *
1486          * Make sure to update b->written so bch2_btree_init_next() doesn't
1487          * break:
1488          */
1489         if (bch2_journal_error(&c->journal) ||
1490             c->opts.nochanges)
1491                 goto err;
1492
1493         trace_btree_write(b, bytes_to_write, sectors_to_write);
1494
1495         wbio = container_of(bio_alloc_bioset(GFP_NOIO,
1496                                 buf_pages(data, sectors_to_write << 9),
1497                                 &c->btree_bio),
1498                             struct btree_write_bio, wbio.bio);
1499         wbio_init(&wbio->wbio.bio);
1500         wbio->data                      = data;
1501         wbio->wbio.order                = order;
1502         wbio->wbio.used_mempool         = used_mempool;
1503         wbio->wbio.bio.bi_opf           = REQ_OP_WRITE|REQ_META;
1504         wbio->wbio.bio.bi_end_io        = btree_node_write_endio;
1505         wbio->wbio.bio.bi_private       = b;
1506
1507         if (b->level || !b->written)
1508                 wbio->wbio.bio.bi_opf |= REQ_FUA;
1509
1510         bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
1511
1512         /*
1513          * If we're appending to a leaf node, we don't technically need FUA -
1514          * this write just needs to be persisted before the next journal write,
1515          * which will be marked FLUSH|FUA.
1516          *
1517          * Similarly if we're writing a new btree root - the pointer is going to
1518          * be in the next journal entry.
1519          *
1520          * But if we're writing a new btree node (that isn't a root) or
1521          * appending to a non leaf btree node, we need either FUA or a flush
1522          * when we write the parent with the new pointer. FUA is cheaper than a
1523          * flush, and writes appending to leaf nodes aren't blocking anything so
1524          * just make all btree node writes FUA to keep things sane.
1525          */
1526
1527         bkey_copy(&k.key, &b->key);
1528
1529         bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&k.key)), ptr)
1530                 ptr->offset += b->written;
1531
1532         b->written += sectors_to_write;
1533
1534         bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
1535         return;
1536 err:
1537         set_btree_node_noevict(b);
1538         b->written += sectors_to_write;
1539 nowrite:
1540         btree_bounce_free(c, order, used_mempool, data);
1541         btree_node_write_done(c, b);
1542 }
1543
1544 /*
1545  * Work that must be done with write lock held:
1546  */
1547 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1548 {
1549         bool invalidated_iter = false;
1550         struct btree_node_entry *bne;
1551         struct bset_tree *t;
1552
1553         if (!btree_node_just_written(b))
1554                 return false;
1555
1556         BUG_ON(b->whiteout_u64s);
1557         BUG_ON(b->uncompacted_whiteout_u64s);
1558
1559         clear_btree_node_just_written(b);
1560
1561         /*
1562          * Note: immediately after write, bset_written() doesn't work - the
1563          * amount of data we had to write after compaction might have been
1564          * smaller than the offset of the last bset.
1565          *
1566          * However, we know that all bsets have been written here, as long as
1567          * we're still holding the write lock:
1568          */
1569
1570         /*
1571          * XXX: decide if we really want to unconditionally sort down to a
1572          * single bset:
1573          */
1574         if (b->nsets > 1) {
1575                 btree_node_sort(c, b, NULL, 0, b->nsets, true);
1576                 invalidated_iter = true;
1577         } else {
1578                 invalidated_iter = bch2_drop_whiteouts(b);
1579         }
1580
1581         for_each_bset(b, t)
1582                 set_needs_whiteout(bset(b, t));
1583
1584         bch2_btree_verify(c, b);
1585
1586         /*
1587          * If later we don't unconditionally sort down to a single bset, we have
1588          * to ensure this is still true:
1589          */
1590         BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1591
1592         bne = want_new_bset(c, b);
1593         if (bne)
1594                 bch2_bset_init_next(c, b, bne);
1595
1596         bch2_btree_build_aux_trees(b);
1597
1598         return invalidated_iter;
1599 }
1600
1601 /*
1602  * Use this one if the node is intent locked:
1603  */
1604 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1605                           enum six_lock_type lock_type_held)
1606 {
1607         BUG_ON(lock_type_held == SIX_LOCK_write);
1608
1609         if (lock_type_held == SIX_LOCK_intent ||
1610             six_lock_tryupgrade(&b->lock)) {
1611                 __bch2_btree_node_write(c, b, SIX_LOCK_intent);
1612
1613                 /* don't cycle lock unnecessarily: */
1614                 if (btree_node_just_written(b) &&
1615                     six_trylock_write(&b->lock)) {
1616                         bch2_btree_post_write_cleanup(c, b);
1617                         six_unlock_write(&b->lock);
1618                 }
1619
1620                 if (lock_type_held == SIX_LOCK_read)
1621                         six_lock_downgrade(&b->lock);
1622         } else {
1623                 __bch2_btree_node_write(c, b, SIX_LOCK_read);
1624         }
1625 }
1626
1627 static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
1628 {
1629         struct bucket_table *tbl;
1630         struct rhash_head *pos;
1631         struct btree *b;
1632         unsigned i;
1633 restart:
1634         rcu_read_lock();
1635         for_each_cached_btree(b, c, tbl, i, pos)
1636                 if (test_bit(flag, &b->flags)) {
1637                         rcu_read_unlock();
1638                         wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
1639                         goto restart;
1640
1641                 }
1642         rcu_read_unlock();
1643 }
1644
1645 void bch2_btree_flush_all_reads(struct bch_fs *c)
1646 {
1647         __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
1648 }
1649
1650 void bch2_btree_flush_all_writes(struct bch_fs *c)
1651 {
1652         __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
1653 }
1654
1655 void bch2_btree_verify_flushed(struct bch_fs *c)
1656 {
1657         struct bucket_table *tbl;
1658         struct rhash_head *pos;
1659         struct btree *b;
1660         unsigned i;
1661
1662         rcu_read_lock();
1663         for_each_cached_btree(b, c, tbl, i, pos) {
1664                 unsigned long flags = READ_ONCE(b->flags);
1665
1666                 BUG_ON((flags & (1 << BTREE_NODE_dirty)) ||
1667                        (flags & (1 << BTREE_NODE_write_in_flight)));
1668         }
1669         rcu_read_unlock();
1670 }
1671
1672 ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
1673 {
1674         struct printbuf out = _PBUF(buf, PAGE_SIZE);
1675         struct bucket_table *tbl;
1676         struct rhash_head *pos;
1677         struct btree *b;
1678         unsigned i;
1679
1680         rcu_read_lock();
1681         for_each_cached_btree(b, c, tbl, i, pos) {
1682                 unsigned long flags = READ_ONCE(b->flags);
1683                 unsigned idx = (flags & (1 << BTREE_NODE_write_idx)) != 0;
1684
1685                 if (!(flags & (1 << BTREE_NODE_dirty)))
1686                         continue;
1687
1688                 pr_buf(&out, "%p d %u n %u l %u w %u b %u r %u:%lu c %u p %u\n",
1689                        b,
1690                        (flags & (1 << BTREE_NODE_dirty)) != 0,
1691                        (flags & (1 << BTREE_NODE_need_write)) != 0,
1692                        b->level,
1693                        b->written,
1694                        !list_empty_careful(&b->write_blocked),
1695                        b->will_make_reachable != 0,
1696                        b->will_make_reachable & 1,
1697                        b->writes[ idx].wait.list.first != NULL,
1698                        b->writes[!idx].wait.list.first != NULL);
1699         }
1700         rcu_read_unlock();
1701
1702         return out.pos - buf;
1703 }