]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_update_interior.c
Update bcachefs sources to e99d29e402 bcachefs: zstd support, compression refactoring
[bcachefs-tools-debian] / libbcachefs / btree_update_interior.c
1
2 #include "bcachefs.h"
3 #include "alloc.h"
4 #include "bkey_methods.h"
5 #include "btree_cache.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_io.h"
10 #include "btree_iter.h"
11 #include "btree_locking.h"
12 #include "buckets.h"
13 #include "extents.h"
14 #include "journal.h"
15 #include "keylist.h"
16 #include "super-io.h"
17
18 #include <linux/random.h>
19 #include <trace/events/bcachefs.h>
20
21 static void btree_node_will_make_reachable(struct btree_update *,
22                                            struct btree *);
23 static void btree_update_drop_new_node(struct bch_fs *, struct btree *);
24 static void bch2_btree_set_root_ondisk(struct bch_fs *, struct btree *, int);
25
26 /* Debug code: */
27
28 static void btree_node_interior_verify(struct btree *b)
29 {
30         struct btree_node_iter iter;
31         struct bkey_packed *k;
32
33         BUG_ON(!b->level);
34
35         bch2_btree_node_iter_init(&iter, b, b->key.k.p, false, false);
36 #if 1
37         BUG_ON(!(k = bch2_btree_node_iter_peek(&iter, b)) ||
38                bkey_cmp_left_packed(b, k, &b->key.k.p));
39
40         BUG_ON((bch2_btree_node_iter_advance(&iter, b),
41                 !bch2_btree_node_iter_end(&iter)));
42 #else
43         const char *msg;
44
45         msg = "not found";
46         k = bch2_btree_node_iter_peek(&iter, b);
47         if (!k)
48                 goto err;
49
50         msg = "isn't what it should be";
51         if (bkey_cmp_left_packed(b, k, &b->key.k.p))
52                 goto err;
53
54         bch2_btree_node_iter_advance(&iter, b);
55
56         msg = "isn't last key";
57         if (!bch2_btree_node_iter_end(&iter))
58                 goto err;
59         return;
60 err:
61         bch2_dump_btree_node(b);
62         printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode,
63                b->key.k.p.offset, msg);
64         BUG();
65 #endif
66 }
67
68 /* Calculate ideal packed bkey format for new btree nodes: */
69
70 void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
71 {
72         struct bkey_packed *k;
73         struct bset_tree *t;
74         struct bkey uk;
75
76         bch2_bkey_format_add_pos(s, b->data->min_key);
77
78         for_each_bset(b, t)
79                 for (k = btree_bkey_first(b, t);
80                      k != btree_bkey_last(b, t);
81                      k = bkey_next(k))
82                         if (!bkey_whiteout(k)) {
83                                 uk = bkey_unpack_key(b, k);
84                                 bch2_bkey_format_add_key(s, &uk);
85                         }
86 }
87
88 static struct bkey_format bch2_btree_calc_format(struct btree *b)
89 {
90         struct bkey_format_state s;
91
92         bch2_bkey_format_init(&s);
93         __bch2_btree_calc_format(&s, b);
94
95         return bch2_bkey_format_done(&s);
96 }
97
98 static size_t btree_node_u64s_with_format(struct btree *b,
99                                           struct bkey_format *new_f)
100 {
101         struct bkey_format *old_f = &b->format;
102
103         /* stupid integer promotion rules */
104         ssize_t delta =
105             (((int) new_f->key_u64s - old_f->key_u64s) *
106              (int) b->nr.packed_keys) +
107             (((int) new_f->key_u64s - BKEY_U64s) *
108              (int) b->nr.unpacked_keys);
109
110         BUG_ON(delta + b->nr.live_u64s < 0);
111
112         return b->nr.live_u64s + delta;
113 }
114
115 /**
116  * btree_node_format_fits - check if we could rewrite node with a new format
117  *
118  * This assumes all keys can pack with the new format -- it just checks if
119  * the re-packed keys would fit inside the node itself.
120  */
121 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
122                                  struct bkey_format *new_f)
123 {
124         size_t u64s = btree_node_u64s_with_format(b, new_f);
125
126         return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
127 }
128
129 /* Btree node freeing/allocation: */
130
131 static bool btree_key_matches(struct bch_fs *c,
132                               struct bkey_s_c_extent l,
133                               struct bkey_s_c_extent r)
134 {
135         const struct bch_extent_ptr *ptr1, *ptr2;
136
137         extent_for_each_ptr(l, ptr1)
138                 extent_for_each_ptr(r, ptr2)
139                         if (ptr1->dev == ptr2->dev &&
140                             ptr1->gen == ptr2->gen &&
141                             ptr1->offset == ptr2->offset)
142                                 return true;
143
144         return false;
145 }
146
147 /*
148  * We're doing the index update that makes @b unreachable, update stuff to
149  * reflect that:
150  *
151  * Must be called _before_ btree_update_updated_root() or
152  * btree_update_updated_node:
153  */
154 static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
155                                        struct bkey_s_c k,
156                                        struct bch_fs_usage *stats)
157 {
158         struct bch_fs *c = as->c;
159         struct pending_btree_node_free *d;
160         unsigned replicas;
161
162         /*
163          * btree_update lock is only needed here to avoid racing with
164          * gc:
165          */
166         mutex_lock(&c->btree_interior_update_lock);
167
168         for (d = as->pending; d < as->pending + as->nr_pending; d++)
169                 if (!bkey_cmp(k.k->p, d->key.k.p) &&
170                     btree_key_matches(c, bkey_s_c_to_extent(k),
171                                       bkey_i_to_s_c_extent(&d->key)))
172                         goto found;
173         BUG();
174 found:
175         BUG_ON(d->index_update_done);
176         d->index_update_done = true;
177
178         /*
179          * Btree nodes are accounted as freed in bch_alloc_stats when they're
180          * freed from the index:
181          */
182         replicas = bch2_extent_nr_dirty_ptrs(k);
183         if (replicas)
184                 stats->s[replicas - 1].data[S_META] -= c->opts.btree_node_size;
185
186         /*
187          * We're dropping @k from the btree, but it's still live until the
188          * index update is persistent so we need to keep a reference around for
189          * mark and sweep to find - that's primarily what the
190          * btree_node_pending_free list is for.
191          *
192          * So here (when we set index_update_done = true), we're moving an
193          * existing reference to a different part of the larger "gc keyspace" -
194          * and the new position comes after the old position, since GC marks
195          * the pending free list after it walks the btree.
196          *
197          * If we move the reference while mark and sweep is _between_ the old
198          * and the new position, mark and sweep will see the reference twice
199          * and it'll get double accounted - so check for that here and subtract
200          * to cancel out one of mark and sweep's markings if necessary:
201          */
202
203         /*
204          * bch2_mark_key() compares the current gc pos to the pos we're
205          * moving this reference from, hence one comparison here:
206          */
207         if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
208                 struct bch_fs_usage tmp = { 0 };
209
210                 bch2_mark_key(c, bkey_i_to_s_c(&d->key),
211                              -c->opts.btree_node_size, true, b
212                              ? gc_pos_btree_node(b)
213                              : gc_pos_btree_root(as->btree_id),
214                              &tmp, 0, 0);
215                 /*
216                  * Don't apply tmp - pending deletes aren't tracked in
217                  * bch_alloc_stats:
218                  */
219         }
220
221         mutex_unlock(&c->btree_interior_update_lock);
222 }
223
224 static void __btree_node_free(struct bch_fs *c, struct btree *b,
225                               struct btree_iter *iter)
226 {
227         trace_btree_node_free(c, b);
228
229         BUG_ON(btree_node_dirty(b));
230         BUG_ON(btree_node_need_write(b));
231         BUG_ON(b == btree_node_root(c, b));
232         BUG_ON(b->ob.nr);
233         BUG_ON(!list_empty(&b->write_blocked));
234         BUG_ON(b->will_make_reachable);
235
236         clear_btree_node_noevict(b);
237
238         six_lock_write(&b->lock);
239
240         bch2_btree_node_hash_remove(&c->btree_cache, b);
241
242         mutex_lock(&c->btree_cache.lock);
243         list_move(&b->list, &c->btree_cache.freeable);
244         mutex_unlock(&c->btree_cache.lock);
245
246         /*
247          * By using six_unlock_write() directly instead of
248          * bch2_btree_node_unlock_write(), we don't update the iterator's
249          * sequence numbers and cause future bch2_btree_node_relock() calls to
250          * fail:
251          */
252         six_unlock_write(&b->lock);
253 }
254
255 void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
256 {
257         struct btree_ob_ref ob = b->ob;
258
259         btree_update_drop_new_node(c, b);
260
261         b->ob.nr = 0;
262
263         clear_btree_node_dirty(b);
264
265         __btree_node_free(c, b, NULL);
266
267         bch2_open_bucket_put_refs(c, &ob.nr, ob.refs);
268 }
269
270 void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
271                                 struct btree_iter *iter)
272 {
273         /*
274          * Is this a node that isn't reachable on disk yet?
275          *
276          * Nodes that aren't reachable yet have writes blocked until they're
277          * reachable - now that we've cancelled any pending writes and moved
278          * things waiting on that write to wait on this update, we can drop this
279          * node from the list of nodes that the other update is making
280          * reachable, prior to freeing it:
281          */
282         btree_update_drop_new_node(c, b);
283
284         bch2_btree_iter_node_drop_linked(iter, b);
285
286         __btree_node_free(c, b, iter);
287
288         bch2_btree_iter_node_drop(iter, b);
289 }
290
291 static void bch2_btree_node_free_ondisk(struct bch_fs *c,
292                                         struct pending_btree_node_free *pending)
293 {
294         struct bch_fs_usage stats = { 0 };
295
296         BUG_ON(!pending->index_update_done);
297
298         bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
299                      -c->opts.btree_node_size, true,
300                      gc_phase(GC_PHASE_PENDING_DELETE),
301                      &stats, 0, 0);
302         /*
303          * Don't apply stats - pending deletes aren't tracked in
304          * bch_alloc_stats:
305          */
306 }
307
308 void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *b)
309 {
310         bch2_open_bucket_put_refs(c, &b->ob.nr, b->ob.refs);
311 }
312
313 static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
314                                              struct disk_reservation *res,
315                                              struct closure *cl,
316                                              unsigned flags)
317 {
318         struct write_point *wp;
319         struct btree *b;
320         BKEY_PADDED(k) tmp;
321         struct bkey_i_extent *e;
322         struct btree_ob_ref ob;
323         struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
324         unsigned nr_reserve;
325         enum alloc_reserve alloc_reserve;
326
327         if (flags & BTREE_INSERT_USE_ALLOC_RESERVE) {
328                 nr_reserve      = 0;
329                 alloc_reserve   = RESERVE_ALLOC;
330         } else if (flags & BTREE_INSERT_USE_RESERVE) {
331                 nr_reserve      = BTREE_NODE_RESERVE / 2;
332                 alloc_reserve   = RESERVE_BTREE;
333         } else {
334                 nr_reserve      = BTREE_NODE_RESERVE;
335                 alloc_reserve   = RESERVE_NONE;
336         }
337
338         mutex_lock(&c->btree_reserve_cache_lock);
339         if (c->btree_reserve_cache_nr > nr_reserve) {
340                 struct btree_alloc *a =
341                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
342
343                 ob = a->ob;
344                 bkey_copy(&tmp.k, &a->k);
345                 mutex_unlock(&c->btree_reserve_cache_lock);
346                 goto mem_alloc;
347         }
348         mutex_unlock(&c->btree_reserve_cache_lock);
349
350 retry:
351         wp = bch2_alloc_sectors_start(c, NULL,
352                                       writepoint_ptr(&c->btree_write_point),
353                                       &devs_have,
354                                       res->nr_replicas,
355                                       c->opts.metadata_replicas_required,
356                                       alloc_reserve, 0, cl);
357         if (IS_ERR(wp))
358                 return ERR_CAST(wp);
359
360         if (wp->sectors_free < c->opts.btree_node_size) {
361                 struct open_bucket *ob;
362                 unsigned i;
363
364                 writepoint_for_each_ptr(wp, ob, i)
365                         if (ob->sectors_free < c->opts.btree_node_size)
366                                 ob->sectors_free = 0;
367
368                 bch2_alloc_sectors_done(c, wp);
369                 goto retry;
370         }
371
372         e = bkey_extent_init(&tmp.k);
373         bch2_alloc_sectors_append_ptrs(c, wp, e, c->opts.btree_node_size);
374
375         ob.nr = 0;
376         bch2_open_bucket_get(c, wp, &ob.nr, ob.refs);
377         bch2_alloc_sectors_done(c, wp);
378 mem_alloc:
379         b = bch2_btree_node_mem_alloc(c);
380
381         /* we hold cannibalize_lock: */
382         BUG_ON(IS_ERR(b));
383         BUG_ON(b->ob.nr);
384
385         bkey_copy(&b->key, &tmp.k);
386         b->ob = ob;
387
388         return b;
389 }
390
391 static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned level)
392 {
393         struct bch_fs *c = as->c;
394         struct btree *b;
395
396         BUG_ON(level >= BTREE_MAX_DEPTH);
397         BUG_ON(!as->reserve->nr);
398
399         b = as->reserve->b[--as->reserve->nr];
400
401         BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id));
402
403         set_btree_node_accessed(b);
404         set_btree_node_dirty(b);
405
406         bch2_bset_init_first(b, &b->data->keys);
407         memset(&b->nr, 0, sizeof(b->nr));
408         b->data->magic = cpu_to_le64(bset_magic(c));
409         b->data->flags = 0;
410         SET_BTREE_NODE_ID(b->data, as->btree_id);
411         SET_BTREE_NODE_LEVEL(b->data, level);
412         b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr;
413
414         bch2_btree_build_aux_trees(b);
415
416         btree_node_will_make_reachable(as, b);
417
418         trace_btree_node_alloc(c, b);
419         return b;
420 }
421
422 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
423                                                   struct btree *b,
424                                                   struct bkey_format format)
425 {
426         struct btree *n;
427
428         n = bch2_btree_node_alloc(as, b->level);
429
430         n->data->min_key        = b->data->min_key;
431         n->data->max_key        = b->data->max_key;
432         n->data->format         = format;
433         SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
434
435         btree_node_set_format(n, format);
436
437         bch2_btree_sort_into(as->c, n, b);
438
439         btree_node_reset_sib_u64s(n);
440
441         n->key.k.p = b->key.k.p;
442         return n;
443 }
444
445 static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
446                                                        struct btree *b)
447 {
448         struct bkey_format new_f = bch2_btree_calc_format(b);
449
450         /*
451          * The keys might expand with the new format - if they wouldn't fit in
452          * the btree node anymore, use the old format for now:
453          */
454         if (!bch2_btree_node_format_fits(as->c, b, &new_f))
455                 new_f = b->format;
456
457         return __bch2_btree_node_alloc_replacement(as, b, new_f);
458 }
459
460 static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level)
461 {
462         struct btree *b = bch2_btree_node_alloc(as, level);
463
464         b->data->min_key = POS_MIN;
465         b->data->max_key = POS_MAX;
466         b->data->format = bch2_btree_calc_format(b);
467         b->key.k.p = POS_MAX;
468
469         btree_node_set_format(b, b->data->format);
470         bch2_btree_build_aux_trees(b);
471
472         six_unlock_write(&b->lock);
473
474         return b;
475 }
476
477 static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
478 {
479         bch2_disk_reservation_put(c, &reserve->disk_res);
480
481         mutex_lock(&c->btree_reserve_cache_lock);
482
483         while (reserve->nr) {
484                 struct btree *b = reserve->b[--reserve->nr];
485
486                 six_unlock_write(&b->lock);
487
488                 if (c->btree_reserve_cache_nr <
489                     ARRAY_SIZE(c->btree_reserve_cache)) {
490                         struct btree_alloc *a =
491                                 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
492
493                         a->ob = b->ob;
494                         b->ob.nr = 0;
495                         bkey_copy(&a->k, &b->key);
496                 } else {
497                         bch2_btree_open_bucket_put(c, b);
498                 }
499
500                 __btree_node_free(c, b, NULL);
501
502                 six_unlock_intent(&b->lock);
503         }
504
505         mutex_unlock(&c->btree_reserve_cache_lock);
506
507         mempool_free(reserve, &c->btree_reserve_pool);
508 }
509
510 static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
511                                                     unsigned nr_nodes,
512                                                     unsigned flags,
513                                                     struct closure *cl)
514 {
515         struct btree_reserve *reserve;
516         struct btree *b;
517         struct disk_reservation disk_res = { 0, 0 };
518         unsigned sectors = nr_nodes * c->opts.btree_node_size;
519         int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD;
520
521         if (flags & BTREE_INSERT_NOFAIL)
522                 disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
523
524         /*
525          * This check isn't necessary for correctness - it's just to potentially
526          * prevent us from doing a lot of work that'll end up being wasted:
527          */
528         ret = bch2_journal_error(&c->journal);
529         if (ret)
530                 return ERR_PTR(ret);
531
532         if (bch2_disk_reservation_get(c, &disk_res, sectors,
533                                       c->opts.metadata_replicas,
534                                       disk_res_flags))
535                 return ERR_PTR(-ENOSPC);
536
537         BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
538
539         /*
540          * Protects reaping from the btree node cache and using the btree node
541          * open bucket reserve:
542          */
543         ret = bch2_btree_cache_cannibalize_lock(c, cl);
544         if (ret) {
545                 bch2_disk_reservation_put(c, &disk_res);
546                 return ERR_PTR(ret);
547         }
548
549         reserve = mempool_alloc(&c->btree_reserve_pool, GFP_NOIO);
550
551         reserve->disk_res = disk_res;
552         reserve->nr = 0;
553
554         while (reserve->nr < nr_nodes) {
555                 b = __bch2_btree_node_alloc(c, &disk_res,
556                                             flags & BTREE_INSERT_NOWAIT
557                                             ? NULL : cl, flags);
558                 if (IS_ERR(b)) {
559                         ret = PTR_ERR(b);
560                         goto err_free;
561                 }
562
563                 ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE,
564                                               bkey_i_to_s_c(&b->key));
565                 if (ret)
566                         goto err_free;
567
568                 reserve->b[reserve->nr++] = b;
569         }
570
571         bch2_btree_cache_cannibalize_unlock(c);
572         return reserve;
573 err_free:
574         bch2_btree_reserve_put(c, reserve);
575         bch2_btree_cache_cannibalize_unlock(c);
576         trace_btree_reserve_get_fail(c, nr_nodes, cl);
577         return ERR_PTR(ret);
578 }
579
580 /* Asynchronous interior node update machinery */
581
582 static void bch2_btree_update_free(struct btree_update *as)
583 {
584         struct bch_fs *c = as->c;
585
586         BUG_ON(as->nr_new_nodes);
587         BUG_ON(as->nr_pending);
588
589         if (as->reserve)
590                 bch2_btree_reserve_put(c, as->reserve);
591
592         mutex_lock(&c->btree_interior_update_lock);
593         list_del(&as->list);
594
595         closure_debug_destroy(&as->cl);
596         mempool_free(as, &c->btree_interior_update_pool);
597         percpu_ref_put(&c->writes);
598
599         closure_wake_up(&c->btree_interior_update_wait);
600         mutex_unlock(&c->btree_interior_update_lock);
601 }
602
603 static void btree_update_nodes_reachable(struct closure *cl)
604 {
605         struct btree_update *as = container_of(cl, struct btree_update, cl);
606         struct bch_fs *c = as->c;
607
608         bch2_journal_pin_drop(&c->journal, &as->journal);
609
610         mutex_lock(&c->btree_interior_update_lock);
611
612         while (as->nr_new_nodes) {
613                 struct btree *b = as->new_nodes[--as->nr_new_nodes];
614
615                 BUG_ON(b->will_make_reachable != (unsigned long) as);
616                 b->will_make_reachable = 0;
617                 mutex_unlock(&c->btree_interior_update_lock);
618
619                 /*
620                  * b->will_make_reachable prevented it from being written, so
621                  * write it now if it needs to be written:
622                  */
623                 six_lock_read(&b->lock);
624                 bch2_btree_node_write_cond(c, b, btree_node_need_write(b));
625                 six_unlock_read(&b->lock);
626                 mutex_lock(&c->btree_interior_update_lock);
627         }
628
629         while (as->nr_pending)
630                 bch2_btree_node_free_ondisk(c, &as->pending[--as->nr_pending]);
631
632         mutex_unlock(&c->btree_interior_update_lock);
633
634         closure_wake_up(&as->wait);
635
636         bch2_btree_update_free(as);
637 }
638
639 static void btree_update_wait_on_journal(struct closure *cl)
640 {
641         struct btree_update *as = container_of(cl, struct btree_update, cl);
642         struct bch_fs *c = as->c;
643         int ret;
644
645         ret = bch2_journal_open_seq_async(&c->journal, as->journal_seq, cl);
646         if (ret < 0)
647                 goto err;
648         if (!ret)
649                 continue_at(cl, btree_update_wait_on_journal, system_wq);
650
651         bch2_journal_flush_seq_async(&c->journal, as->journal_seq, cl);
652 err:
653         continue_at(cl, btree_update_nodes_reachable, system_wq);
654 }
655
656 static void btree_update_nodes_written(struct closure *cl)
657 {
658         struct btree_update *as = container_of(cl, struct btree_update, cl);
659         struct bch_fs *c = as->c;
660         struct btree *b;
661
662         /*
663          * We did an update to a parent node where the pointers we added pointed
664          * to child nodes that weren't written yet: now, the child nodes have
665          * been written so we can write out the update to the interior node.
666          */
667 retry:
668         mutex_lock(&c->btree_interior_update_lock);
669         as->nodes_written = true;
670
671         switch (as->mode) {
672         case BTREE_INTERIOR_NO_UPDATE:
673                 BUG();
674         case BTREE_INTERIOR_UPDATING_NODE:
675                 /* The usual case: */
676                 b = READ_ONCE(as->b);
677
678                 if (!six_trylock_read(&b->lock)) {
679                         mutex_unlock(&c->btree_interior_update_lock);
680                         six_lock_read(&b->lock);
681                         six_unlock_read(&b->lock);
682                         goto retry;
683                 }
684
685                 BUG_ON(!btree_node_dirty(b));
686                 closure_wait(&btree_current_write(b)->wait, cl);
687
688                 list_del(&as->write_blocked_list);
689                 mutex_unlock(&c->btree_interior_update_lock);
690
691                 /*
692                  * b->write_blocked prevented it from being written, so
693                  * write it now if it needs to be written:
694                  */
695                 bch2_btree_node_write_cond(c, b, true);
696                 six_unlock_read(&b->lock);
697                 break;
698
699         case BTREE_INTERIOR_UPDATING_AS:
700                 /*
701                  * The btree node we originally updated has been freed and is
702                  * being rewritten - so we need to write anything here, we just
703                  * need to signal to that btree_update that it's ok to make the
704                  * new replacement node visible:
705                  */
706                 closure_put(&as->parent_as->cl);
707
708                 /*
709                  * and then we have to wait on that btree_update to finish:
710                  */
711                 closure_wait(&as->parent_as->wait, cl);
712                 mutex_unlock(&c->btree_interior_update_lock);
713                 break;
714
715         case BTREE_INTERIOR_UPDATING_ROOT:
716                 /* b is the new btree root: */
717                 b = READ_ONCE(as->b);
718
719                 if (!six_trylock_read(&b->lock)) {
720                         mutex_unlock(&c->btree_interior_update_lock);
721                         six_lock_read(&b->lock);
722                         six_unlock_read(&b->lock);
723                         goto retry;
724                 }
725
726                 BUG_ON(c->btree_roots[b->btree_id].as != as);
727                 c->btree_roots[b->btree_id].as = NULL;
728
729                 bch2_btree_set_root_ondisk(c, b, WRITE);
730
731                 /*
732                  * We don't have to wait anything anything here (before
733                  * btree_update_nodes_reachable frees the old nodes
734                  * ondisk) - we've ensured that the very next journal write will
735                  * have the pointer to the new root, and before the allocator
736                  * can reuse the old nodes it'll have to do a journal commit:
737                  */
738                 six_unlock_read(&b->lock);
739                 mutex_unlock(&c->btree_interior_update_lock);
740
741                 /*
742                  * Bit of funny circularity going on here we have to break:
743                  *
744                  * We have to drop our journal pin before writing the journal
745                  * entry that points to the new btree root: else, we could
746                  * deadlock if the journal currently happens to be full.
747                  *
748                  * This mean we're dropping the journal pin _before_ the new
749                  * nodes are technically reachable - but this is safe, because
750                  * after the bch2_btree_set_root_ondisk() call above they will
751                  * be reachable as of the very next journal write:
752                  */
753                 bch2_journal_pin_drop(&c->journal, &as->journal);
754
755                 as->journal_seq = bch2_journal_last_unwritten_seq(&c->journal);
756
757                 btree_update_wait_on_journal(cl);
758                 return;
759         }
760
761         continue_at(cl, btree_update_nodes_reachable, system_wq);
762 }
763
764 /*
765  * We're updating @b with pointers to nodes that haven't finished writing yet:
766  * block @b from being written until @as completes
767  */
768 static void btree_update_updated_node(struct btree_update *as, struct btree *b)
769 {
770         struct bch_fs *c = as->c;
771
772         mutex_lock(&c->btree_interior_update_lock);
773
774         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
775         BUG_ON(!btree_node_dirty(b));
776
777         as->mode = BTREE_INTERIOR_UPDATING_NODE;
778         as->b = b;
779         list_add(&as->write_blocked_list, &b->write_blocked);
780
781         mutex_unlock(&c->btree_interior_update_lock);
782
783         /*
784          * In general, when you're staging things in a journal that will later
785          * be written elsewhere, and you also want to guarantee ordering: that
786          * is, if you have updates a, b, c, after a crash you should never see c
787          * and not a or b - there's a problem:
788          *
789          * If the final destination of the update(s) (i.e. btree node) can be
790          * written/flushed _before_ the relevant journal entry - oops, that
791          * breaks ordering, since the various leaf nodes can be written in any
792          * order.
793          *
794          * Normally we use bset->journal_seq to deal with this - if during
795          * recovery we find a btree node write that's newer than the newest
796          * journal entry, we just ignore it - we don't need it, anything we're
797          * supposed to have (that we reported as completed via fsync()) will
798          * still be in the journal, and as far as the state of the journal is
799          * concerned that btree node write never happened.
800          *
801          * That breaks when we're rewriting/splitting/merging nodes, since we're
802          * mixing btree node writes that haven't happened yet with previously
803          * written data that has been reported as completed to the journal.
804          *
805          * Thus, before making the new nodes reachable, we have to wait the
806          * newest journal sequence number we have data for to be written (if it
807          * hasn't been yet).
808          */
809         bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
810 }
811
812 static void interior_update_flush(struct journal *j,
813                         struct journal_entry_pin *pin, u64 seq)
814 {
815         struct btree_update *as =
816                 container_of(pin, struct btree_update, journal);
817
818         bch2_journal_flush_seq_async(j, as->journal_seq, NULL);
819 }
820
821 static void btree_update_reparent(struct btree_update *as,
822                                   struct btree_update *child)
823 {
824         struct bch_fs *c = as->c;
825
826         child->b = NULL;
827         child->mode = BTREE_INTERIOR_UPDATING_AS;
828         child->parent_as = as;
829         closure_get(&as->cl);
830
831         /*
832          * When we write a new btree root, we have to drop our journal pin
833          * _before_ the new nodes are technically reachable; see
834          * btree_update_nodes_written().
835          *
836          * This goes for journal pins that are recursively blocked on us - so,
837          * just transfer the journal pin to the new interior update so
838          * btree_update_nodes_written() can drop it.
839          */
840         bch2_journal_pin_add_if_older(&c->journal, &child->journal,
841                                       &as->journal, interior_update_flush);
842         bch2_journal_pin_drop(&c->journal, &child->journal);
843
844         as->journal_seq = max(as->journal_seq, child->journal_seq);
845 }
846
847 static void btree_update_updated_root(struct btree_update *as)
848 {
849         struct bch_fs *c = as->c;
850         struct btree_root *r = &c->btree_roots[as->btree_id];
851
852         mutex_lock(&c->btree_interior_update_lock);
853
854         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
855
856         /*
857          * Old root might not be persistent yet - if so, redirect its
858          * btree_update operation to point to us:
859          */
860         if (r->as)
861                 btree_update_reparent(as, r->as);
862
863         as->mode = BTREE_INTERIOR_UPDATING_ROOT;
864         as->b = r->b;
865         r->as = as;
866
867         mutex_unlock(&c->btree_interior_update_lock);
868
869         /*
870          * When we're rewriting nodes and updating interior nodes, there's an
871          * issue with updates that haven't been written in the journal getting
872          * mixed together with older data - see btree_update_updated_node()
873          * for the explanation.
874          *
875          * However, this doesn't affect us when we're writing a new btree root -
876          * because to make that new root reachable we have to write out a new
877          * journal entry, which must necessarily be newer than as->journal_seq.
878          */
879 }
880
881 static void btree_node_will_make_reachable(struct btree_update *as,
882                                            struct btree *b)
883 {
884         struct bch_fs *c = as->c;
885
886         mutex_lock(&c->btree_interior_update_lock);
887         BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
888         BUG_ON(b->will_make_reachable);
889
890         as->new_nodes[as->nr_new_nodes++] = b;
891         b->will_make_reachable = 1UL|(unsigned long) as;
892
893         closure_get(&as->cl);
894         mutex_unlock(&c->btree_interior_update_lock);
895 }
896
897 static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
898 {
899         struct btree_update *as;
900         unsigned long v;
901         unsigned i;
902
903         mutex_lock(&c->btree_interior_update_lock);
904         v = xchg(&b->will_make_reachable, 0);
905         as = (struct btree_update *) (v & ~1UL);
906
907         if (!as) {
908                 mutex_unlock(&c->btree_interior_update_lock);
909                 return;
910         }
911
912         for (i = 0; i < as->nr_new_nodes; i++)
913                 if (as->new_nodes[i] == b)
914                         goto found;
915
916         BUG();
917 found:
918         array_remove_item(as->new_nodes, as->nr_new_nodes, i);
919         mutex_unlock(&c->btree_interior_update_lock);
920
921         if (v & 1)
922                 closure_put(&as->cl);
923 }
924
925 static void btree_interior_update_add_node_reference(struct btree_update *as,
926                                                      struct btree *b)
927 {
928         struct bch_fs *c = as->c;
929         struct pending_btree_node_free *d;
930
931         mutex_lock(&c->btree_interior_update_lock);
932
933         /* Add this node to the list of nodes being freed: */
934         BUG_ON(as->nr_pending >= ARRAY_SIZE(as->pending));
935
936         d = &as->pending[as->nr_pending++];
937         d->index_update_done    = false;
938         d->seq                  = b->data->keys.seq;
939         d->btree_id             = b->btree_id;
940         d->level                = b->level;
941         bkey_copy(&d->key, &b->key);
942
943         mutex_unlock(&c->btree_interior_update_lock);
944 }
945
946 /*
947  * @b is being split/rewritten: it may have pointers to not-yet-written btree
948  * nodes and thus outstanding btree_updates - redirect @b's
949  * btree_updates to point to this btree_update:
950  */
951 void bch2_btree_interior_update_will_free_node(struct btree_update *as,
952                                                struct btree *b)
953 {
954         struct bch_fs *c = as->c;
955         struct closure *cl, *cl_n;
956         struct btree_update *p, *n;
957         struct btree_write *w;
958         struct bset_tree *t;
959
960         set_btree_node_dying(b);
961
962         if (btree_node_fake(b))
963                 return;
964
965         btree_interior_update_add_node_reference(as, b);
966
967         /*
968          * Does this node have data that hasn't been written in the journal?
969          *
970          * If so, we have to wait for the corresponding journal entry to be
971          * written before making the new nodes reachable - we can't just carry
972          * over the bset->journal_seq tracking, since we'll be mixing those keys
973          * in with keys that aren't in the journal anymore:
974          */
975         for_each_bset(b, t)
976                 as->journal_seq = max(as->journal_seq,
977                                       le64_to_cpu(bset(b, t)->journal_seq));
978
979         mutex_lock(&c->btree_interior_update_lock);
980
981         /*
982          * Does this node have any btree_update operations preventing
983          * it from being written?
984          *
985          * If so, redirect them to point to this btree_update: we can
986          * write out our new nodes, but we won't make them visible until those
987          * operations complete
988          */
989         list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
990                 list_del(&p->write_blocked_list);
991                 btree_update_reparent(as, p);
992         }
993
994         clear_btree_node_dirty(b);
995         clear_btree_node_need_write(b);
996         w = btree_current_write(b);
997
998         /*
999          * Does this node have any btree_update operations waiting on this node
1000          * to be written?
1001          *
1002          * If so, wake them up when this btree_update operation is reachable:
1003          */
1004         llist_for_each_entry_safe(cl, cl_n, llist_del_all(&w->wait.list), list)
1005                 llist_add(&cl->list, &as->wait.list);
1006
1007         /*
1008          * Does this node have unwritten data that has a pin on the journal?
1009          *
1010          * If so, transfer that pin to the btree_update operation -
1011          * note that if we're freeing multiple nodes, we only need to keep the
1012          * oldest pin of any of the nodes we're freeing. We'll release the pin
1013          * when the new nodes are persistent and reachable on disk:
1014          */
1015         bch2_journal_pin_add_if_older(&c->journal, &w->journal,
1016                                       &as->journal, interior_update_flush);
1017         bch2_journal_pin_drop(&c->journal, &w->journal);
1018
1019         w = btree_prev_write(b);
1020         bch2_journal_pin_add_if_older(&c->journal, &w->journal,
1021                                       &as->journal, interior_update_flush);
1022         bch2_journal_pin_drop(&c->journal, &w->journal);
1023
1024         mutex_unlock(&c->btree_interior_update_lock);
1025 }
1026
1027 void bch2_btree_update_done(struct btree_update *as)
1028 {
1029         BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
1030
1031         bch2_btree_reserve_put(as->c, as->reserve);
1032         as->reserve = NULL;
1033
1034         continue_at(&as->cl, btree_update_nodes_written, system_freezable_wq);
1035 }
1036
1037 struct btree_update *
1038 bch2_btree_update_start(struct bch_fs *c, enum btree_id id,
1039                         unsigned nr_nodes, unsigned flags,
1040                         struct closure *cl)
1041 {
1042         struct btree_reserve *reserve;
1043         struct btree_update *as;
1044
1045         if (unlikely(!percpu_ref_tryget(&c->writes)))
1046                 return ERR_PTR(-EROFS);
1047
1048         reserve = bch2_btree_reserve_get(c, nr_nodes, flags, cl);
1049         if (IS_ERR(reserve)) {
1050                 percpu_ref_put(&c->writes);
1051                 return ERR_CAST(reserve);
1052         }
1053
1054         as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
1055         memset(as, 0, sizeof(*as));
1056         closure_init(&as->cl, NULL);
1057         as->c           = c;
1058         as->mode        = BTREE_INTERIOR_NO_UPDATE;
1059         as->btree_id    = id;
1060         as->reserve     = reserve;
1061         INIT_LIST_HEAD(&as->write_blocked_list);
1062
1063         bch2_keylist_init(&as->parent_keys, as->inline_keys);
1064
1065         mutex_lock(&c->btree_interior_update_lock);
1066         list_add_tail(&as->list, &c->btree_interior_update_list);
1067         mutex_unlock(&c->btree_interior_update_lock);
1068
1069         return as;
1070 }
1071
1072 /* Btree root updates: */
1073
1074 static void __bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
1075 {
1076         /* Root nodes cannot be reaped */
1077         mutex_lock(&c->btree_cache.lock);
1078         list_del_init(&b->list);
1079         mutex_unlock(&c->btree_cache.lock);
1080
1081         mutex_lock(&c->btree_root_lock);
1082         BUG_ON(btree_node_root(c, b) &&
1083                (b->level < btree_node_root(c, b)->level ||
1084                 !btree_node_dying(btree_node_root(c, b))));
1085
1086         btree_node_root(c, b) = b;
1087         mutex_unlock(&c->btree_root_lock);
1088
1089         bch2_recalc_btree_reserve(c);
1090 }
1091
1092 static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
1093 {
1094         struct bch_fs *c = as->c;
1095         struct btree *old = btree_node_root(c, b);
1096         struct bch_fs_usage stats = { 0 };
1097
1098         __bch2_btree_set_root_inmem(c, b);
1099
1100         bch2_mark_key(c, bkey_i_to_s_c(&b->key),
1101                       c->opts.btree_node_size, true,
1102                       gc_pos_btree_root(b->btree_id),
1103                       &stats, 0, 0);
1104
1105         if (old && !btree_node_fake(old))
1106                 bch2_btree_node_free_index(as, NULL,
1107                                            bkey_i_to_s_c(&old->key),
1108                                            &stats);
1109         bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
1110                             gc_pos_btree_root(b->btree_id));
1111 }
1112
1113 static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b, int rw)
1114 {
1115         struct btree_root *r = &c->btree_roots[b->btree_id];
1116
1117         mutex_lock(&c->btree_root_lock);
1118
1119         BUG_ON(b != r->b);
1120         bkey_copy(&r->key, &b->key);
1121         r->level = b->level;
1122         r->alive = true;
1123         if (rw == WRITE)
1124                 c->btree_roots_dirty = true;
1125
1126         mutex_unlock(&c->btree_root_lock);
1127 }
1128
1129 /**
1130  * bch_btree_set_root - update the root in memory and on disk
1131  *
1132  * To ensure forward progress, the current task must not be holding any
1133  * btree node write locks. However, you must hold an intent lock on the
1134  * old root.
1135  *
1136  * Note: This allocates a journal entry but doesn't add any keys to
1137  * it.  All the btree roots are part of every journal write, so there
1138  * is nothing new to be done.  This just guarantees that there is a
1139  * journal write.
1140  */
1141 static void bch2_btree_set_root(struct btree_update *as, struct btree *b,
1142                                 struct btree_iter *iter)
1143 {
1144         struct bch_fs *c = as->c;
1145         struct btree *old;
1146
1147         trace_btree_set_root(c, b);
1148         BUG_ON(!b->written);
1149
1150         old = btree_node_root(c, b);
1151
1152         /*
1153          * Ensure no one is using the old root while we switch to the
1154          * new root:
1155          */
1156         bch2_btree_node_lock_write(old, iter);
1157
1158         bch2_btree_set_root_inmem(as, b);
1159
1160         btree_update_updated_root(as);
1161
1162         /*
1163          * Unlock old root after new root is visible:
1164          *
1165          * The new root isn't persistent, but that's ok: we still have
1166          * an intent lock on the new root, and any updates that would
1167          * depend on the new root would have to update the new root.
1168          */
1169         bch2_btree_node_unlock_write(old, iter);
1170 }
1171
1172 /* Interior node updates: */
1173
1174 static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b,
1175                                         struct btree_iter *iter,
1176                                         struct bkey_i *insert,
1177                                         struct btree_node_iter *node_iter)
1178 {
1179         struct bch_fs *c = as->c;
1180         struct bch_fs_usage stats = { 0 };
1181         struct bkey_packed *k;
1182         struct bkey tmp;
1183
1184         BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b));
1185
1186         if (bkey_extent_is_data(&insert->k))
1187                 bch2_mark_key(c, bkey_i_to_s_c(insert),
1188                              c->opts.btree_node_size, true,
1189                              gc_pos_btree_node(b), &stats, 0, 0);
1190
1191         while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
1192                !btree_iter_pos_cmp_packed(b, &insert->k.p, k, false))
1193                 bch2_btree_node_iter_advance(node_iter, b);
1194
1195         /*
1196          * If we're overwriting, look up pending delete and mark so that gc
1197          * marks it on the pending delete list:
1198          */
1199         if (k && !bkey_cmp_packed(b, k, &insert->k))
1200                 bch2_btree_node_free_index(as, b,
1201                                            bkey_disassemble(b, k, &tmp),
1202                                            &stats);
1203
1204         bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
1205                             gc_pos_btree_node(b));
1206
1207         bch2_btree_bset_insert_key(iter, b, node_iter, insert);
1208         set_btree_node_dirty(b);
1209         set_btree_node_need_write(b);
1210 }
1211
1212 /*
1213  * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1214  * node)
1215  */
1216 static struct btree *__btree_split_node(struct btree_update *as,
1217                                         struct btree *n1,
1218                                         struct btree_iter *iter)
1219 {
1220         size_t nr_packed = 0, nr_unpacked = 0;
1221         struct btree *n2;
1222         struct bset *set1, *set2;
1223         struct bkey_packed *k, *prev = NULL;
1224
1225         n2 = bch2_btree_node_alloc(as, n1->level);
1226
1227         n2->data->max_key       = n1->data->max_key;
1228         n2->data->format        = n1->format;
1229         SET_BTREE_NODE_SEQ(n2->data, BTREE_NODE_SEQ(n1->data));
1230         n2->key.k.p = n1->key.k.p;
1231
1232         btree_node_set_format(n2, n2->data->format);
1233
1234         set1 = btree_bset_first(n1);
1235         set2 = btree_bset_first(n2);
1236
1237         /*
1238          * Has to be a linear search because we don't have an auxiliary
1239          * search tree yet
1240          */
1241         k = set1->start;
1242         while (1) {
1243                 if (bkey_next(k) == vstruct_last(set1))
1244                         break;
1245                 if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
1246                         break;
1247
1248                 if (bkey_packed(k))
1249                         nr_packed++;
1250                 else
1251                         nr_unpacked++;
1252
1253                 prev = k;
1254                 k = bkey_next(k);
1255         }
1256
1257         BUG_ON(!prev);
1258
1259         n1->key.k.p = bkey_unpack_pos(n1, prev);
1260         n1->data->max_key = n1->key.k.p;
1261         n2->data->min_key =
1262                 btree_type_successor(n1->btree_id, n1->key.k.p);
1263
1264         set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
1265         set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
1266
1267         set_btree_bset_end(n1, n1->set);
1268         set_btree_bset_end(n2, n2->set);
1269
1270         n2->nr.live_u64s        = le16_to_cpu(set2->u64s);
1271         n2->nr.bset_u64s[0]     = le16_to_cpu(set2->u64s);
1272         n2->nr.packed_keys      = n1->nr.packed_keys - nr_packed;
1273         n2->nr.unpacked_keys    = n1->nr.unpacked_keys - nr_unpacked;
1274
1275         n1->nr.live_u64s        = le16_to_cpu(set1->u64s);
1276         n1->nr.bset_u64s[0]     = le16_to_cpu(set1->u64s);
1277         n1->nr.packed_keys      = nr_packed;
1278         n1->nr.unpacked_keys    = nr_unpacked;
1279
1280         BUG_ON(!set1->u64s);
1281         BUG_ON(!set2->u64s);
1282
1283         memcpy_u64s(set2->start,
1284                     vstruct_end(set1),
1285                     le16_to_cpu(set2->u64s));
1286
1287         btree_node_reset_sib_u64s(n1);
1288         btree_node_reset_sib_u64s(n2);
1289
1290         bch2_verify_btree_nr_keys(n1);
1291         bch2_verify_btree_nr_keys(n2);
1292
1293         if (n1->level) {
1294                 btree_node_interior_verify(n1);
1295                 btree_node_interior_verify(n2);
1296         }
1297
1298         return n2;
1299 }
1300
1301 /*
1302  * For updates to interior nodes, we've got to do the insert before we split
1303  * because the stuff we're inserting has to be inserted atomically. Post split,
1304  * the keys might have to go in different nodes and the split would no longer be
1305  * atomic.
1306  *
1307  * Worse, if the insert is from btree node coalescing, if we do the insert after
1308  * we do the split (and pick the pivot) - the pivot we pick might be between
1309  * nodes that were coalesced, and thus in the middle of a child node post
1310  * coalescing:
1311  */
1312 static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
1313                                     struct btree_iter *iter,
1314                                     struct keylist *keys)
1315 {
1316         struct btree_node_iter node_iter;
1317         struct bkey_i *k = bch2_keylist_front(keys);
1318         struct bkey_packed *p;
1319         struct bset *i;
1320
1321         BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
1322
1323         bch2_btree_node_iter_init(&node_iter, b, k->k.p, false, false);
1324
1325         while (!bch2_keylist_empty(keys)) {
1326                 k = bch2_keylist_front(keys);
1327
1328                 BUG_ON(bch_keylist_u64s(keys) >
1329                        bch_btree_keys_u64s_remaining(as->c, b));
1330                 BUG_ON(bkey_cmp(k->k.p, b->data->min_key) < 0);
1331                 BUG_ON(bkey_cmp(k->k.p, b->data->max_key) > 0);
1332
1333                 bch2_insert_fixup_btree_ptr(as, b, iter, k, &node_iter);
1334                 bch2_keylist_pop_front(keys);
1335         }
1336
1337         /*
1338          * We can't tolerate whiteouts here - with whiteouts there can be
1339          * duplicate keys, and it would be rather bad if we picked a duplicate
1340          * for the pivot:
1341          */
1342         i = btree_bset_first(b);
1343         p = i->start;
1344         while (p != vstruct_last(i))
1345                 if (bkey_deleted(p)) {
1346                         le16_add_cpu(&i->u64s, -p->u64s);
1347                         set_btree_bset_end(b, b->set);
1348                         memmove_u64s_down(p, bkey_next(p),
1349                                           (u64 *) vstruct_last(i) -
1350                                           (u64 *) p);
1351                 } else
1352                         p = bkey_next(p);
1353
1354         BUG_ON(b->nsets != 1 ||
1355                b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
1356
1357         btree_node_interior_verify(b);
1358 }
1359
1360 static void btree_split(struct btree_update *as, struct btree *b,
1361                         struct btree_iter *iter, struct keylist *keys)
1362 {
1363         struct bch_fs *c = as->c;
1364         struct btree *parent = btree_node_parent(iter, b);
1365         struct btree *n1, *n2 = NULL, *n3 = NULL;
1366         u64 start_time = local_clock();
1367
1368         BUG_ON(!parent && (b != btree_node_root(c, b)));
1369         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1370
1371         bch2_btree_interior_update_will_free_node(as, b);
1372
1373         n1 = bch2_btree_node_alloc_replacement(as, b);
1374
1375         if (keys)
1376                 btree_split_insert_keys(as, n1, iter, keys);
1377
1378         if (vstruct_blocks(n1->data, c->block_bits) > BTREE_SPLIT_THRESHOLD(c)) {
1379                 trace_btree_split(c, b);
1380
1381                 n2 = __btree_split_node(as, n1, iter);
1382
1383                 bch2_btree_build_aux_trees(n2);
1384                 bch2_btree_build_aux_trees(n1);
1385                 six_unlock_write(&n2->lock);
1386                 six_unlock_write(&n1->lock);
1387
1388                 bch2_btree_node_write(c, n2, SIX_LOCK_intent);
1389
1390                 /*
1391                  * Note that on recursive parent_keys == keys, so we
1392                  * can't start adding new keys to parent_keys before emptying it
1393                  * out (which we did with btree_split_insert_keys() above)
1394                  */
1395                 bch2_keylist_add(&as->parent_keys, &n1->key);
1396                 bch2_keylist_add(&as->parent_keys, &n2->key);
1397
1398                 if (!parent) {
1399                         /* Depth increases, make a new root */
1400                         n3 = __btree_root_alloc(as, b->level + 1);
1401
1402                         n3->sib_u64s[0] = U16_MAX;
1403                         n3->sib_u64s[1] = U16_MAX;
1404
1405                         btree_split_insert_keys(as, n3, iter, &as->parent_keys);
1406
1407                         bch2_btree_node_write(c, n3, SIX_LOCK_intent);
1408                 }
1409         } else {
1410                 trace_btree_compact(c, b);
1411
1412                 bch2_btree_build_aux_trees(n1);
1413                 six_unlock_write(&n1->lock);
1414
1415                 bch2_keylist_add(&as->parent_keys, &n1->key);
1416         }
1417
1418         bch2_btree_node_write(c, n1, SIX_LOCK_intent);
1419
1420         /* New nodes all written, now make them visible: */
1421
1422         if (parent) {
1423                 /* Split a non root node */
1424                 bch2_btree_insert_node(as, parent, iter, &as->parent_keys);
1425         } else if (n3) {
1426                 bch2_btree_set_root(as, n3, iter);
1427         } else {
1428                 /* Root filled up but didn't need to be split */
1429                 bch2_btree_set_root(as, n1, iter);
1430         }
1431
1432         bch2_btree_open_bucket_put(c, n1);
1433         if (n2)
1434                 bch2_btree_open_bucket_put(c, n2);
1435         if (n3)
1436                 bch2_btree_open_bucket_put(c, n3);
1437
1438         /*
1439          * Note - at this point other linked iterators could still have @b read
1440          * locked; we're depending on the bch2_btree_iter_node_replace() calls
1441          * below removing all references to @b so we don't return with other
1442          * iterators pointing to a node they have locked that's been freed.
1443          *
1444          * We have to free the node first because the bch2_iter_node_replace()
1445          * calls will drop _our_ iterator's reference - and intent lock - to @b.
1446          */
1447         bch2_btree_node_free_inmem(c, b, iter);
1448
1449         /* Successful split, update the iterator to point to the new nodes: */
1450
1451         if (n3)
1452                 bch2_btree_iter_node_replace(iter, n3);
1453         if (n2)
1454                 bch2_btree_iter_node_replace(iter, n2);
1455         bch2_btree_iter_node_replace(iter, n1);
1456
1457         bch2_time_stats_update(&c->btree_split_time, start_time);
1458 }
1459
1460 static void
1461 bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
1462                                 struct btree_iter *iter, struct keylist *keys)
1463 {
1464         struct btree_iter *linked;
1465         struct btree_node_iter node_iter;
1466         struct bkey_i *insert = bch2_keylist_front(keys);
1467         struct bkey_packed *k;
1468
1469         /* Don't screw up @iter's position: */
1470         node_iter = iter->l[b->level].iter;
1471
1472         /*
1473          * btree_split(), btree_gc_coalesce() will insert keys before
1474          * the iterator's current position - they know the keys go in
1475          * the node the iterator points to:
1476          */
1477         while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
1478                (bkey_cmp_packed(b, k, &insert->k) >= 0))
1479                 ;
1480
1481         while (!bch2_keylist_empty(keys)) {
1482                 insert = bch2_keylist_front(keys);
1483
1484                 bch2_insert_fixup_btree_ptr(as, b, iter, insert, &node_iter);
1485                 bch2_keylist_pop_front(keys);
1486         }
1487
1488         btree_update_updated_node(as, b);
1489
1490         for_each_linked_btree_node(iter, b, linked)
1491                 bch2_btree_node_iter_peek(&linked->l[b->level].iter, b);
1492         bch2_btree_node_iter_peek(&iter->l[b->level].iter, b);
1493
1494         bch2_btree_iter_verify(iter, b);
1495 }
1496
1497 /**
1498  * bch_btree_insert_node - insert bkeys into a given btree node
1499  *
1500  * @iter:               btree iterator
1501  * @keys:               list of keys to insert
1502  * @hook:               insert callback
1503  * @persistent:         if not null, @persistent will wait on journal write
1504  *
1505  * Inserts as many keys as it can into a given btree node, splitting it if full.
1506  * If a split occurred, this function will return early. This can only happen
1507  * for leaf nodes -- inserts into interior nodes have to be atomic.
1508  */
1509 void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
1510                             struct btree_iter *iter, struct keylist *keys)
1511 {
1512         struct bch_fs *c = as->c;
1513         int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
1514         int old_live_u64s = b->nr.live_u64s;
1515         int live_u64s_added, u64s_added;
1516
1517         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1518         BUG_ON(!b->level);
1519         BUG_ON(!as || as->b);
1520         bch2_verify_keylist_sorted(keys);
1521
1522         if (as->must_rewrite)
1523                 goto split;
1524
1525         bch2_btree_node_lock_for_insert(c, b, iter);
1526
1527         if (!bch2_btree_node_insert_fits(c, b, bch_keylist_u64s(keys))) {
1528                 bch2_btree_node_unlock_write(b, iter);
1529                 goto split;
1530         }
1531
1532         bch2_btree_insert_keys_interior(as, b, iter, keys);
1533
1534         live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
1535         u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
1536
1537         if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
1538                 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
1539         if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
1540                 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
1541
1542         if (u64s_added > live_u64s_added &&
1543             bch2_maybe_compact_whiteouts(c, b))
1544                 bch2_btree_iter_reinit_node(iter, b);
1545
1546         bch2_btree_node_unlock_write(b, iter);
1547
1548         btree_node_interior_verify(b);
1549
1550         bch2_foreground_maybe_merge(c, iter, b->level);
1551         return;
1552 split:
1553         btree_split(as, b, iter, keys);
1554 }
1555
1556 int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
1557                           unsigned btree_reserve_flags)
1558 {
1559         struct btree *b = iter->l[0].b;
1560         struct btree_update *as;
1561         struct closure cl;
1562         int ret = 0;
1563
1564         /*
1565          * We already have a disk reservation and open buckets pinned; this
1566          * allocation must not block:
1567          */
1568         if (iter->btree_id == BTREE_ID_EXTENTS)
1569                 btree_reserve_flags |= BTREE_INSERT_USE_RESERVE;
1570
1571         closure_init_stack(&cl);
1572
1573         /* Hack, because gc and splitting nodes doesn't mix yet: */
1574         if (!down_read_trylock(&c->gc_lock)) {
1575                 bch2_btree_iter_unlock(iter);
1576                 down_read(&c->gc_lock);
1577
1578                 if (btree_iter_linked(iter))
1579                         ret = -EINTR;
1580         }
1581
1582         /*
1583          * XXX: figure out how far we might need to split,
1584          * instead of locking/reserving all the way to the root:
1585          */
1586         if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1587                 ret = -EINTR;
1588                 goto out;
1589         }
1590
1591         as = bch2_btree_update_start(c, iter->btree_id,
1592                                      btree_update_reserve_required(c, b),
1593                                      btree_reserve_flags, &cl);
1594         if (IS_ERR(as)) {
1595                 ret = PTR_ERR(as);
1596                 if (ret == -EAGAIN) {
1597                         bch2_btree_iter_unlock(iter);
1598                         up_read(&c->gc_lock);
1599                         closure_sync(&cl);
1600                         return -EINTR;
1601                 }
1602                 goto out;
1603         }
1604
1605         btree_split(as, b, iter, NULL);
1606         bch2_btree_update_done(as);
1607
1608         bch2_btree_iter_set_locks_want(iter, 1);
1609 out:
1610         up_read(&c->gc_lock);
1611         closure_sync(&cl);
1612         return ret;
1613 }
1614
1615 int __bch2_foreground_maybe_merge(struct bch_fs *c,
1616                                   struct btree_iter *iter,
1617                                   unsigned level,
1618                                   enum btree_node_sibling sib)
1619 {
1620         struct btree_update *as;
1621         struct bkey_format_state new_s;
1622         struct bkey_format new_f;
1623         struct bkey_i delete;
1624         struct btree *b, *m, *n, *prev, *next, *parent;
1625         struct closure cl;
1626         size_t sib_u64s;
1627         int ret = 0;
1628
1629         closure_init_stack(&cl);
1630 retry:
1631         if (!bch2_btree_node_relock(iter, level))
1632                 return 0;
1633
1634         b = iter->l[level].b;
1635
1636         parent = btree_node_parent(iter, b);
1637         if (!parent)
1638                 return 0;
1639
1640         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1641                 return 0;
1642
1643         /* XXX: can't be holding read locks */
1644         m = bch2_btree_node_get_sibling(c, iter, b, sib);
1645         if (IS_ERR(m)) {
1646                 ret = PTR_ERR(m);
1647                 goto out;
1648         }
1649
1650         /* NULL means no sibling: */
1651         if (!m) {
1652                 b->sib_u64s[sib] = U16_MAX;
1653                 return 0;
1654         }
1655
1656         if (sib == btree_prev_sib) {
1657                 prev = m;
1658                 next = b;
1659         } else {
1660                 prev = b;
1661                 next = m;
1662         }
1663
1664         bch2_bkey_format_init(&new_s);
1665         __bch2_btree_calc_format(&new_s, b);
1666         __bch2_btree_calc_format(&new_s, m);
1667         new_f = bch2_bkey_format_done(&new_s);
1668
1669         sib_u64s = btree_node_u64s_with_format(b, &new_f) +
1670                 btree_node_u64s_with_format(m, &new_f);
1671
1672         if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
1673                 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1674                 sib_u64s /= 2;
1675                 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1676         }
1677
1678         sib_u64s = min(sib_u64s, btree_max_u64s(c));
1679         b->sib_u64s[sib] = sib_u64s;
1680
1681         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
1682                 six_unlock_intent(&m->lock);
1683                 return 0;
1684         }
1685
1686         /* We're changing btree topology, doesn't mix with gc: */
1687         if (!down_read_trylock(&c->gc_lock)) {
1688                 six_unlock_intent(&m->lock);
1689                 bch2_btree_iter_unlock(iter);
1690
1691                 down_read(&c->gc_lock);
1692                 up_read(&c->gc_lock);
1693                 ret = -EINTR;
1694                 goto out;
1695         }
1696
1697         if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1698                 ret = -EINTR;
1699                 goto out_unlock;
1700         }
1701
1702         as = bch2_btree_update_start(c, iter->btree_id,
1703                                      btree_update_reserve_required(c, b),
1704                                      BTREE_INSERT_NOFAIL|
1705                                      BTREE_INSERT_USE_RESERVE,
1706                                      &cl);
1707         if (IS_ERR(as)) {
1708                 ret = PTR_ERR(as);
1709                 goto out_unlock;
1710         }
1711
1712         bch2_btree_interior_update_will_free_node(as, b);
1713         bch2_btree_interior_update_will_free_node(as, m);
1714
1715         n = bch2_btree_node_alloc(as, b->level);
1716
1717         n->data->min_key        = prev->data->min_key;
1718         n->data->max_key        = next->data->max_key;
1719         n->data->format         = new_f;
1720         n->key.k.p              = next->key.k.p;
1721
1722         btree_node_set_format(n, new_f);
1723
1724         bch2_btree_sort_into(c, n, prev);
1725         bch2_btree_sort_into(c, n, next);
1726
1727         bch2_btree_build_aux_trees(n);
1728         six_unlock_write(&n->lock);
1729
1730         bkey_init(&delete.k);
1731         delete.k.p = prev->key.k.p;
1732         bch2_keylist_add(&as->parent_keys, &delete);
1733         bch2_keylist_add(&as->parent_keys, &n->key);
1734
1735         bch2_btree_node_write(c, n, SIX_LOCK_intent);
1736
1737         bch2_btree_insert_node(as, parent, iter, &as->parent_keys);
1738
1739         bch2_btree_open_bucket_put(c, n);
1740         bch2_btree_node_free_inmem(c, b, iter);
1741         bch2_btree_node_free_inmem(c, m, iter);
1742         bch2_btree_iter_node_replace(iter, n);
1743
1744         bch2_btree_iter_verify(iter, n);
1745
1746         bch2_btree_update_done(as);
1747 out_unlock:
1748         if (ret != -EINTR && ret != -EAGAIN)
1749                 bch2_btree_iter_set_locks_want(iter, 1);
1750         six_unlock_intent(&m->lock);
1751         up_read(&c->gc_lock);
1752 out:
1753         if (ret == -EAGAIN || ret == -EINTR) {
1754                 bch2_btree_iter_unlock(iter);
1755                 ret = -EINTR;
1756         }
1757
1758         closure_sync(&cl);
1759
1760         if (ret == -EINTR) {
1761                 ret = bch2_btree_iter_traverse(iter);
1762                 if (!ret)
1763                         goto retry;
1764         }
1765
1766         return ret;
1767 }
1768
1769 static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
1770                                 struct btree *b, unsigned flags,
1771                                 struct closure *cl)
1772 {
1773         struct btree *n, *parent = btree_node_parent(iter, b);
1774         struct btree_update *as;
1775
1776         as = bch2_btree_update_start(c, iter->btree_id,
1777                                      btree_update_reserve_required(c, b),
1778                                      flags, cl);
1779         if (IS_ERR(as)) {
1780                 trace_btree_gc_rewrite_node_fail(c, b);
1781                 return PTR_ERR(as);
1782         }
1783
1784         bch2_btree_interior_update_will_free_node(as, b);
1785
1786         n = bch2_btree_node_alloc_replacement(as, b);
1787
1788         bch2_btree_build_aux_trees(n);
1789         six_unlock_write(&n->lock);
1790
1791         trace_btree_gc_rewrite_node(c, b);
1792
1793         bch2_btree_node_write(c, n, SIX_LOCK_intent);
1794
1795         if (parent) {
1796                 bch2_btree_insert_node(as, parent, iter,
1797                                        &keylist_single(&n->key));
1798         } else {
1799                 bch2_btree_set_root(as, n, iter);
1800         }
1801
1802         bch2_btree_open_bucket_put(c, n);
1803
1804         bch2_btree_node_free_inmem(c, b, iter);
1805
1806         BUG_ON(!bch2_btree_iter_node_replace(iter, n));
1807
1808         bch2_btree_update_done(as);
1809         return 0;
1810 }
1811
1812 /**
1813  * bch_btree_node_rewrite - Rewrite/move a btree node
1814  *
1815  * Returns 0 on success, -EINTR or -EAGAIN on failure (i.e.
1816  * btree_check_reserve() has to wait)
1817  */
1818 int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
1819                             __le64 seq, unsigned flags)
1820 {
1821         unsigned locks_want = iter->locks_want;
1822         struct closure cl;
1823         struct btree *b;
1824         int ret;
1825
1826         flags |= BTREE_INSERT_NOFAIL;
1827
1828         closure_init_stack(&cl);
1829
1830         bch2_btree_iter_set_locks_want(iter, U8_MAX);
1831
1832         if (!(flags & BTREE_INSERT_GC_LOCK_HELD)) {
1833                 if (!down_read_trylock(&c->gc_lock)) {
1834                         bch2_btree_iter_unlock(iter);
1835                         down_read(&c->gc_lock);
1836                 }
1837         }
1838
1839         while (1) {
1840                 ret = bch2_btree_iter_traverse(iter);
1841                 if (ret)
1842                         break;
1843
1844                 b = bch2_btree_iter_peek_node(iter);
1845                 if (!b || b->data->keys.seq != seq)
1846                         break;
1847
1848                 ret = __btree_node_rewrite(c, iter, b, flags, &cl);
1849                 if (ret != -EAGAIN &&
1850                     ret != -EINTR)
1851                         break;
1852
1853                 bch2_btree_iter_unlock(iter);
1854                 closure_sync(&cl);
1855         }
1856
1857         bch2_btree_iter_set_locks_want(iter, locks_want);
1858
1859         if (!(flags & BTREE_INSERT_GC_LOCK_HELD))
1860                 up_read(&c->gc_lock);
1861
1862         closure_sync(&cl);
1863         return ret;
1864 }
1865
1866 static void __bch2_btree_node_update_key(struct bch_fs *c,
1867                                          struct btree_update *as,
1868                                          struct btree_iter *iter,
1869                                          struct btree *b, struct btree *new_hash,
1870                                          struct bkey_i_extent *new_key)
1871 {
1872         struct btree *parent;
1873         int ret;
1874
1875         /*
1876          * Two corner cases that need to be thought about here:
1877          *
1878          * @b may not be reachable yet - there might be another interior update
1879          * operation waiting on @b to be written, and we're gonna deliver the
1880          * write completion to that interior update operation _before_
1881          * persisting the new_key update
1882          *
1883          * That ends up working without us having to do anything special here:
1884          * the reason is, we do kick off (and do the in memory updates) for the
1885          * update for @new_key before we return, creating a new interior_update
1886          * operation here.
1887          *
1888          * The new interior update operation here will in effect override the
1889          * previous one. The previous one was going to terminate - make @b
1890          * reachable - in one of two ways:
1891          * - updating the btree root pointer
1892          *   In that case,
1893          *   no, this doesn't work. argh.
1894          */
1895
1896         if (b->will_make_reachable)
1897                 as->must_rewrite = true;
1898
1899         btree_interior_update_add_node_reference(as, b);
1900
1901         parent = btree_node_parent(iter, b);
1902         if (parent) {
1903                 if (new_hash) {
1904                         bkey_copy(&new_hash->key, &new_key->k_i);
1905                         ret = bch2_btree_node_hash_insert(&c->btree_cache,
1906                                         new_hash, b->level, b->btree_id);
1907                         BUG_ON(ret);
1908                 }
1909
1910                 bch2_keylist_add(&as->parent_keys, &new_key->k_i);
1911                 bch2_btree_insert_node(as, parent, iter, &as->parent_keys);
1912
1913                 if (new_hash) {
1914                         mutex_lock(&c->btree_cache.lock);
1915                         bch2_btree_node_hash_remove(&c->btree_cache, new_hash);
1916
1917                         bch2_btree_node_hash_remove(&c->btree_cache, b);
1918
1919                         bkey_copy(&b->key, &new_key->k_i);
1920                         ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
1921                         BUG_ON(ret);
1922                         mutex_unlock(&c->btree_cache.lock);
1923                 } else {
1924                         bkey_copy(&b->key, &new_key->k_i);
1925                 }
1926         } else {
1927                 struct bch_fs_usage stats = { 0 };
1928
1929                 BUG_ON(btree_node_root(c, b) != b);
1930
1931                 bch2_btree_node_lock_write(b, iter);
1932
1933                 bch2_mark_key(c, bkey_i_to_s_c(&new_key->k_i),
1934                               c->opts.btree_node_size, true,
1935                               gc_pos_btree_root(b->btree_id),
1936                               &stats, 0, 0);
1937                 bch2_btree_node_free_index(as, NULL,
1938                                            bkey_i_to_s_c(&b->key),
1939                                            &stats);
1940                 bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
1941                                     gc_pos_btree_root(b->btree_id));
1942
1943                 if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
1944                         mutex_lock(&c->btree_cache.lock);
1945                         bch2_btree_node_hash_remove(&c->btree_cache, b);
1946
1947                         bkey_copy(&b->key, &new_key->k_i);
1948                         ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
1949                         BUG_ON(ret);
1950                         mutex_unlock(&c->btree_cache.lock);
1951                 } else {
1952                         bkey_copy(&b->key, &new_key->k_i);
1953                 }
1954
1955                 btree_update_updated_root(as);
1956                 bch2_btree_node_unlock_write(b, iter);
1957         }
1958
1959         bch2_btree_update_done(as);
1960 }
1961
1962 int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
1963                                struct btree *b, struct bkey_i_extent *new_key)
1964 {
1965         struct btree_update *as = NULL;
1966         struct btree *new_hash = NULL;
1967         struct closure cl;
1968         int ret;
1969
1970         closure_init_stack(&cl);
1971
1972         if (!down_read_trylock(&c->gc_lock)) {
1973                 bch2_btree_iter_unlock(iter);
1974                 down_read(&c->gc_lock);
1975
1976                 if (!bch2_btree_iter_relock(iter)) {
1977                         ret = -EINTR;
1978                         goto err;
1979                 }
1980         }
1981
1982         /* check PTR_HASH() after @b is locked by btree_iter_traverse(): */
1983         if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
1984                 /* bch2_btree_reserve_get will unlock */
1985                 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1986                 if (ret) {
1987                         ret = -EINTR;
1988
1989                         bch2_btree_iter_unlock(iter);
1990                         up_read(&c->gc_lock);
1991                         closure_sync(&cl);
1992                         down_read(&c->gc_lock);
1993
1994                         if (!bch2_btree_iter_relock(iter))
1995                                 goto err;
1996                 }
1997
1998                 new_hash = bch2_btree_node_mem_alloc(c);
1999         }
2000
2001         as = bch2_btree_update_start(c, iter->btree_id,
2002                                      btree_update_reserve_required(c, b),
2003                                      BTREE_INSERT_NOFAIL|
2004                                      BTREE_INSERT_USE_RESERVE|
2005                                      BTREE_INSERT_USE_ALLOC_RESERVE,
2006                                      &cl);
2007         if (IS_ERR(as)) {
2008                 ret = PTR_ERR(as);
2009                 if (ret == -EAGAIN)
2010                         ret = -EINTR;
2011
2012                 if (ret != -EINTR)
2013                         goto err;
2014
2015                 bch2_btree_iter_unlock(iter);
2016                 up_read(&c->gc_lock);
2017                 closure_sync(&cl);
2018                 down_read(&c->gc_lock);
2019
2020                 if (!bch2_btree_iter_relock(iter))
2021                         goto err;
2022         }
2023
2024         ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE,
2025                                       extent_i_to_s_c(new_key).s_c);
2026         if (ret)
2027                 goto err_free_update;
2028
2029         __bch2_btree_node_update_key(c, as, iter, b, new_hash, new_key);
2030 err:
2031         if (new_hash) {
2032                 mutex_lock(&c->btree_cache.lock);
2033                 list_move(&new_hash->list, &c->btree_cache.freeable);
2034                 mutex_unlock(&c->btree_cache.lock);
2035
2036                 six_unlock_write(&new_hash->lock);
2037                 six_unlock_intent(&new_hash->lock);
2038         }
2039         up_read(&c->gc_lock);
2040         closure_sync(&cl);
2041         return ret;
2042 err_free_update:
2043         bch2_btree_update_free(as);
2044         goto err;
2045 }
2046
2047 /* Init code: */
2048
2049 /*
2050  * Only for filesystem bringup, when first reading the btree roots or allocating
2051  * btree roots when initializing a new filesystem:
2052  */
2053 void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
2054 {
2055         BUG_ON(btree_node_root(c, b));
2056
2057         __bch2_btree_set_root_inmem(c, b);
2058         bch2_btree_set_root_ondisk(c, b, READ);
2059 }
2060
2061 void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
2062 {
2063         struct closure cl;
2064         struct btree *b;
2065         int ret;
2066
2067         closure_init_stack(&cl);
2068
2069         do {
2070                 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
2071                 closure_sync(&cl);
2072         } while (ret);
2073
2074         b = bch2_btree_node_mem_alloc(c);
2075         bch2_btree_cache_cannibalize_unlock(c);
2076
2077         set_btree_node_fake(b);
2078         b->level        = 0;
2079         b->btree_id     = id;
2080
2081         bkey_extent_init(&b->key);
2082         b->key.k.p = POS_MAX;
2083         bkey_i_to_extent(&b->key)->v._data[0] = U64_MAX - id;
2084
2085         bch2_bset_init_first(b, &b->data->keys);
2086         bch2_btree_build_aux_trees(b);
2087
2088         b->data->min_key = POS_MIN;
2089         b->data->max_key = POS_MAX;
2090         b->data->format = bch2_btree_calc_format(b);
2091         btree_node_set_format(b, b->data->format);
2092
2093         ret = bch2_btree_node_hash_insert(&c->btree_cache, b, b->level, b->btree_id);
2094         BUG_ON(ret);
2095
2096         __bch2_btree_set_root_inmem(c, b);
2097
2098         six_unlock_write(&b->lock);
2099         six_unlock_intent(&b->lock);
2100 }
2101
2102 ssize_t bch2_btree_updates_print(struct bch_fs *c, char *buf)
2103 {
2104         char *out = buf, *end = buf + PAGE_SIZE;
2105         struct btree_update *as;
2106
2107         mutex_lock(&c->btree_interior_update_lock);
2108         list_for_each_entry(as, &c->btree_interior_update_list, list)
2109                 out += scnprintf(out, end - out, "%p m %u w %u r %u j %llu\n",
2110                                  as,
2111                                  as->mode,
2112                                  as->nodes_written,
2113                                  atomic_read(&as->cl.remaining) & CLOSURE_REMAINING_MASK,
2114                                  bch2_journal_pin_seq(&c->journal, &as->journal));
2115         mutex_unlock(&c->btree_interior_update_lock);
2116
2117         return out - buf;
2118 }