]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_update_interior.c
c45527a2fa866ea5f007bacf33ffa4323461aedf
[bcachefs-tools-debian] / libbcachefs / btree_update_interior.c
1
2 #include "bcachefs.h"
3 #include "alloc.h"
4 #include "bkey_methods.h"
5 #include "btree_cache.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_io.h"
10 #include "btree_iter.h"
11 #include "btree_locking.h"
12 #include "buckets.h"
13 #include "extents.h"
14 #include "journal.h"
15 #include "keylist.h"
16 #include "super-io.h"
17
18 #include <linux/random.h>
19 #include <trace/events/bcachefs.h>
20
21 static void btree_node_will_make_reachable(struct btree_update *,
22                                            struct btree *);
23 static void btree_update_drop_new_node(struct bch_fs *, struct btree *);
24 static void bch2_btree_set_root_ondisk(struct bch_fs *, struct btree *, int);
25
26 /* Debug code: */
27
28 static void btree_node_interior_verify(struct btree *b)
29 {
30         struct btree_node_iter iter;
31         struct bkey_packed *k;
32
33         BUG_ON(!b->level);
34
35         bch2_btree_node_iter_init(&iter, b, b->key.k.p, false, false);
36 #if 1
37         BUG_ON(!(k = bch2_btree_node_iter_peek(&iter, b)) ||
38                bkey_cmp_left_packed(b, k, &b->key.k.p));
39
40         BUG_ON((bch2_btree_node_iter_advance(&iter, b),
41                 !bch2_btree_node_iter_end(&iter)));
42 #else
43         const char *msg;
44
45         msg = "not found";
46         k = bch2_btree_node_iter_peek(&iter, b);
47         if (!k)
48                 goto err;
49
50         msg = "isn't what it should be";
51         if (bkey_cmp_left_packed(b, k, &b->key.k.p))
52                 goto err;
53
54         bch2_btree_node_iter_advance(&iter, b);
55
56         msg = "isn't last key";
57         if (!bch2_btree_node_iter_end(&iter))
58                 goto err;
59         return;
60 err:
61         bch2_dump_btree_node(b);
62         printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode,
63                b->key.k.p.offset, msg);
64         BUG();
65 #endif
66 }
67
68 /* Calculate ideal packed bkey format for new btree nodes: */
69
70 void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
71 {
72         struct bkey_packed *k;
73         struct bset_tree *t;
74         struct bkey uk;
75
76         bch2_bkey_format_add_pos(s, b->data->min_key);
77
78         for_each_bset(b, t)
79                 for (k = btree_bkey_first(b, t);
80                      k != btree_bkey_last(b, t);
81                      k = bkey_next(k))
82                         if (!bkey_whiteout(k)) {
83                                 uk = bkey_unpack_key(b, k);
84                                 bch2_bkey_format_add_key(s, &uk);
85                         }
86 }
87
88 static struct bkey_format bch2_btree_calc_format(struct btree *b)
89 {
90         struct bkey_format_state s;
91
92         bch2_bkey_format_init(&s);
93         __bch2_btree_calc_format(&s, b);
94
95         return bch2_bkey_format_done(&s);
96 }
97
98 static size_t btree_node_u64s_with_format(struct btree *b,
99                                           struct bkey_format *new_f)
100 {
101         struct bkey_format *old_f = &b->format;
102
103         /* stupid integer promotion rules */
104         ssize_t delta =
105             (((int) new_f->key_u64s - old_f->key_u64s) *
106              (int) b->nr.packed_keys) +
107             (((int) new_f->key_u64s - BKEY_U64s) *
108              (int) b->nr.unpacked_keys);
109
110         BUG_ON(delta + b->nr.live_u64s < 0);
111
112         return b->nr.live_u64s + delta;
113 }
114
115 /**
116  * btree_node_format_fits - check if we could rewrite node with a new format
117  *
118  * This assumes all keys can pack with the new format -- it just checks if
119  * the re-packed keys would fit inside the node itself.
120  */
121 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
122                                  struct bkey_format *new_f)
123 {
124         size_t u64s = btree_node_u64s_with_format(b, new_f);
125
126         return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
127 }
128
129 /* Btree node freeing/allocation: */
130
131 static bool btree_key_matches(struct bch_fs *c,
132                               struct bkey_s_c_extent l,
133                               struct bkey_s_c_extent r)
134 {
135         const struct bch_extent_ptr *ptr1, *ptr2;
136
137         extent_for_each_ptr(l, ptr1)
138                 extent_for_each_ptr(r, ptr2)
139                         if (ptr1->dev == ptr2->dev &&
140                             ptr1->gen == ptr2->gen &&
141                             ptr1->offset == ptr2->offset)
142                                 return true;
143
144         return false;
145 }
146
147 /*
148  * We're doing the index update that makes @b unreachable, update stuff to
149  * reflect that:
150  *
151  * Must be called _before_ btree_update_updated_root() or
152  * btree_update_updated_node:
153  */
154 static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
155                                        struct bkey_s_c k,
156                                        struct bch_fs_usage *stats)
157 {
158         struct bch_fs *c = as->c;
159         struct pending_btree_node_free *d;
160         unsigned replicas;
161
162         /*
163          * btree_update lock is only needed here to avoid racing with
164          * gc:
165          */
166         mutex_lock(&c->btree_interior_update_lock);
167
168         for (d = as->pending; d < as->pending + as->nr_pending; d++)
169                 if (!bkey_cmp(k.k->p, d->key.k.p) &&
170                     btree_key_matches(c, bkey_s_c_to_extent(k),
171                                       bkey_i_to_s_c_extent(&d->key)))
172                         goto found;
173         BUG();
174 found:
175         BUG_ON(d->index_update_done);
176         d->index_update_done = true;
177
178         /*
179          * Btree nodes are accounted as freed in bch_alloc_stats when they're
180          * freed from the index:
181          */
182         replicas = bch2_extent_nr_dirty_ptrs(k);
183         if (replicas)
184                 stats->s[replicas - 1].data[S_META] -= c->opts.btree_node_size;
185
186         /*
187          * We're dropping @k from the btree, but it's still live until the
188          * index update is persistent so we need to keep a reference around for
189          * mark and sweep to find - that's primarily what the
190          * btree_node_pending_free list is for.
191          *
192          * So here (when we set index_update_done = true), we're moving an
193          * existing reference to a different part of the larger "gc keyspace" -
194          * and the new position comes after the old position, since GC marks
195          * the pending free list after it walks the btree.
196          *
197          * If we move the reference while mark and sweep is _between_ the old
198          * and the new position, mark and sweep will see the reference twice
199          * and it'll get double accounted - so check for that here and subtract
200          * to cancel out one of mark and sweep's markings if necessary:
201          */
202
203         /*
204          * bch2_mark_key() compares the current gc pos to the pos we're
205          * moving this reference from, hence one comparison here:
206          */
207         if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
208                 struct bch_fs_usage tmp = { 0 };
209
210                 bch2_mark_key(c, bkey_i_to_s_c(&d->key),
211                              -c->opts.btree_node_size, true, b
212                              ? gc_pos_btree_node(b)
213                              : gc_pos_btree_root(as->btree_id),
214                              &tmp, 0, 0);
215                 /*
216                  * Don't apply tmp - pending deletes aren't tracked in
217                  * bch_alloc_stats:
218                  */
219         }
220
221         mutex_unlock(&c->btree_interior_update_lock);
222 }
223
224 static void __btree_node_free(struct bch_fs *c, struct btree *b,
225                               struct btree_iter *iter)
226 {
227         trace_btree_node_free(c, b);
228
229         BUG_ON(btree_node_dirty(b));
230         BUG_ON(btree_node_need_write(b));
231         BUG_ON(b == btree_node_root(c, b));
232         BUG_ON(b->ob.nr);
233         BUG_ON(!list_empty(&b->write_blocked));
234         BUG_ON(b->will_make_reachable);
235
236         clear_btree_node_noevict(b);
237
238         six_lock_write(&b->lock);
239
240         bch2_btree_node_hash_remove(&c->btree_cache, b);
241
242         mutex_lock(&c->btree_cache.lock);
243         list_move(&b->list, &c->btree_cache.freeable);
244         mutex_unlock(&c->btree_cache.lock);
245
246         /*
247          * By using six_unlock_write() directly instead of
248          * bch2_btree_node_unlock_write(), we don't update the iterator's
249          * sequence numbers and cause future bch2_btree_node_relock() calls to
250          * fail:
251          */
252         six_unlock_write(&b->lock);
253 }
254
255 void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
256 {
257         struct btree_ob_ref ob = b->ob;
258
259         btree_update_drop_new_node(c, b);
260
261         b->ob.nr = 0;
262
263         clear_btree_node_dirty(b);
264
265         __btree_node_free(c, b, NULL);
266
267         bch2_open_bucket_put_refs(c, &ob.nr, ob.refs);
268 }
269
270 void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
271                                 struct btree_iter *iter)
272 {
273         /*
274          * Is this a node that isn't reachable on disk yet?
275          *
276          * Nodes that aren't reachable yet have writes blocked until they're
277          * reachable - now that we've cancelled any pending writes and moved
278          * things waiting on that write to wait on this update, we can drop this
279          * node from the list of nodes that the other update is making
280          * reachable, prior to freeing it:
281          */
282         btree_update_drop_new_node(c, b);
283
284         bch2_btree_iter_node_drop_linked(iter, b);
285
286         __btree_node_free(c, b, iter);
287
288         bch2_btree_iter_node_drop(iter, b);
289 }
290
291 static void bch2_btree_node_free_ondisk(struct bch_fs *c,
292                                         struct pending_btree_node_free *pending)
293 {
294         struct bch_fs_usage stats = { 0 };
295
296         BUG_ON(!pending->index_update_done);
297
298         bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
299                      -c->opts.btree_node_size, true,
300                      gc_phase(GC_PHASE_PENDING_DELETE),
301                      &stats, 0, 0);
302         /*
303          * Don't apply stats - pending deletes aren't tracked in
304          * bch_alloc_stats:
305          */
306 }
307
308 void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *b)
309 {
310         bch2_open_bucket_put_refs(c, &b->ob.nr, b->ob.refs);
311 }
312
313 static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
314                                              struct disk_reservation *res,
315                                              struct closure *cl,
316                                              unsigned flags)
317 {
318         struct write_point *wp;
319         struct btree *b;
320         BKEY_PADDED(k) tmp;
321         struct bkey_i_extent *e;
322         struct btree_ob_ref ob;
323         struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
324         unsigned nr_reserve;
325         enum alloc_reserve alloc_reserve;
326
327         if (flags & BTREE_INSERT_USE_ALLOC_RESERVE) {
328                 nr_reserve      = 0;
329                 alloc_reserve   = RESERVE_ALLOC;
330         } else if (flags & BTREE_INSERT_USE_RESERVE) {
331                 nr_reserve      = BTREE_NODE_RESERVE / 2;
332                 alloc_reserve   = RESERVE_BTREE;
333         } else {
334                 nr_reserve      = BTREE_NODE_RESERVE;
335                 alloc_reserve   = RESERVE_NONE;
336         }
337
338         mutex_lock(&c->btree_reserve_cache_lock);
339         if (c->btree_reserve_cache_nr > nr_reserve) {
340                 struct btree_alloc *a =
341                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
342
343                 ob = a->ob;
344                 bkey_copy(&tmp.k, &a->k);
345                 mutex_unlock(&c->btree_reserve_cache_lock);
346                 goto mem_alloc;
347         }
348         mutex_unlock(&c->btree_reserve_cache_lock);
349
350 retry:
351         wp = bch2_alloc_sectors_start(c, NULL,
352                                       writepoint_ptr(&c->btree_write_point),
353                                       &devs_have,
354                                       res->nr_replicas,
355                                       c->opts.metadata_replicas_required,
356                                       alloc_reserve, 0, cl);
357         if (IS_ERR(wp))
358                 return ERR_CAST(wp);
359
360         if (wp->sectors_free < c->opts.btree_node_size) {
361                 struct open_bucket *ob;
362                 unsigned i;
363
364                 writepoint_for_each_ptr(wp, ob, i)
365                         if (ob->sectors_free < c->opts.btree_node_size)
366                                 ob->sectors_free = 0;
367
368                 bch2_alloc_sectors_done(c, wp);
369                 goto retry;
370         }
371
372         e = bkey_extent_init(&tmp.k);
373         bch2_alloc_sectors_append_ptrs(c, wp, e, c->opts.btree_node_size);
374
375         ob.nr = 0;
376         bch2_open_bucket_get(c, wp, &ob.nr, ob.refs);
377         bch2_alloc_sectors_done(c, wp);
378 mem_alloc:
379         b = bch2_btree_node_mem_alloc(c);
380
381         /* we hold cannibalize_lock: */
382         BUG_ON(IS_ERR(b));
383         BUG_ON(b->ob.nr);
384
385         bkey_copy(&b->key, &tmp.k);
386         b->ob = ob;
387
388         return b;
389 }
390
391 static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned level)
392 {
393         struct bch_fs *c = as->c;
394         struct btree *b;
395
396         BUG_ON(level >= BTREE_MAX_DEPTH);
397         BUG_ON(!as->reserve->nr);
398
399         b = as->reserve->b[--as->reserve->nr];
400
401         BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id));
402
403         set_btree_node_accessed(b);
404         set_btree_node_dirty(b);
405
406         bch2_bset_init_first(b, &b->data->keys);
407         memset(&b->nr, 0, sizeof(b->nr));
408         b->data->magic = cpu_to_le64(bset_magic(c));
409         b->data->flags = 0;
410         SET_BTREE_NODE_ID(b->data, as->btree_id);
411         SET_BTREE_NODE_LEVEL(b->data, level);
412         b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr;
413
414         bch2_btree_build_aux_trees(b);
415
416         btree_node_will_make_reachable(as, b);
417
418         trace_btree_node_alloc(c, b);
419         return b;
420 }
421
422 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
423                                                   struct btree *b,
424                                                   struct bkey_format format)
425 {
426         struct btree *n;
427
428         n = bch2_btree_node_alloc(as, b->level);
429
430         n->data->min_key        = b->data->min_key;
431         n->data->max_key        = b->data->max_key;
432         n->data->format         = format;
433
434         btree_node_set_format(n, format);
435
436         bch2_btree_sort_into(as->c, n, b);
437
438         btree_node_reset_sib_u64s(n);
439
440         n->key.k.p = b->key.k.p;
441         return n;
442 }
443
444 static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
445                                                        struct btree *b)
446 {
447         struct bkey_format new_f = bch2_btree_calc_format(b);
448
449         /*
450          * The keys might expand with the new format - if they wouldn't fit in
451          * the btree node anymore, use the old format for now:
452          */
453         if (!bch2_btree_node_format_fits(as->c, b, &new_f))
454                 new_f = b->format;
455
456         return __bch2_btree_node_alloc_replacement(as, b, new_f);
457 }
458
459 static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level)
460 {
461         struct btree *b = bch2_btree_node_alloc(as, level);
462
463         b->data->min_key = POS_MIN;
464         b->data->max_key = POS_MAX;
465         b->data->format = bch2_btree_calc_format(b);
466         b->key.k.p = POS_MAX;
467
468         btree_node_set_format(b, b->data->format);
469         bch2_btree_build_aux_trees(b);
470
471         six_unlock_write(&b->lock);
472
473         return b;
474 }
475
476 static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
477 {
478         bch2_disk_reservation_put(c, &reserve->disk_res);
479
480         mutex_lock(&c->btree_reserve_cache_lock);
481
482         while (reserve->nr) {
483                 struct btree *b = reserve->b[--reserve->nr];
484
485                 six_unlock_write(&b->lock);
486
487                 if (c->btree_reserve_cache_nr <
488                     ARRAY_SIZE(c->btree_reserve_cache)) {
489                         struct btree_alloc *a =
490                                 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
491
492                         a->ob = b->ob;
493                         b->ob.nr = 0;
494                         bkey_copy(&a->k, &b->key);
495                 } else {
496                         bch2_btree_open_bucket_put(c, b);
497                 }
498
499                 __btree_node_free(c, b, NULL);
500
501                 six_unlock_intent(&b->lock);
502         }
503
504         mutex_unlock(&c->btree_reserve_cache_lock);
505
506         mempool_free(reserve, &c->btree_reserve_pool);
507 }
508
509 static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
510                                                     unsigned nr_nodes,
511                                                     unsigned flags,
512                                                     struct closure *cl)
513 {
514         struct btree_reserve *reserve;
515         struct btree *b;
516         struct disk_reservation disk_res = { 0, 0 };
517         unsigned sectors = nr_nodes * c->opts.btree_node_size;
518         int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD;
519
520         if (flags & BTREE_INSERT_NOFAIL)
521                 disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
522
523         /*
524          * This check isn't necessary for correctness - it's just to potentially
525          * prevent us from doing a lot of work that'll end up being wasted:
526          */
527         ret = bch2_journal_error(&c->journal);
528         if (ret)
529                 return ERR_PTR(ret);
530
531         if (bch2_disk_reservation_get(c, &disk_res, sectors,
532                                       c->opts.metadata_replicas,
533                                       disk_res_flags))
534                 return ERR_PTR(-ENOSPC);
535
536         BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
537
538         /*
539          * Protects reaping from the btree node cache and using the btree node
540          * open bucket reserve:
541          */
542         ret = bch2_btree_cache_cannibalize_lock(c, cl);
543         if (ret) {
544                 bch2_disk_reservation_put(c, &disk_res);
545                 return ERR_PTR(ret);
546         }
547
548         reserve = mempool_alloc(&c->btree_reserve_pool, GFP_NOIO);
549
550         reserve->disk_res = disk_res;
551         reserve->nr = 0;
552
553         while (reserve->nr < nr_nodes) {
554                 b = __bch2_btree_node_alloc(c, &disk_res,
555                                             flags & BTREE_INSERT_NOWAIT
556                                             ? NULL : cl, flags);
557                 if (IS_ERR(b)) {
558                         ret = PTR_ERR(b);
559                         goto err_free;
560                 }
561
562                 ret = bch2_check_mark_super(c, BCH_DATA_BTREE,
563                                         bch2_bkey_devs(bkey_i_to_s_c(&b->key)));
564                 if (ret)
565                         goto err_free;
566
567                 reserve->b[reserve->nr++] = b;
568         }
569
570         bch2_btree_cache_cannibalize_unlock(c);
571         return reserve;
572 err_free:
573         bch2_btree_reserve_put(c, reserve);
574         bch2_btree_cache_cannibalize_unlock(c);
575         trace_btree_reserve_get_fail(c, nr_nodes, cl);
576         return ERR_PTR(ret);
577 }
578
579 /* Asynchronous interior node update machinery */
580
581 static void bch2_btree_update_free(struct btree_update *as)
582 {
583         struct bch_fs *c = as->c;
584
585         BUG_ON(as->nr_new_nodes);
586         BUG_ON(as->nr_pending);
587
588         if (as->reserve)
589                 bch2_btree_reserve_put(c, as->reserve);
590
591         mutex_lock(&c->btree_interior_update_lock);
592         list_del(&as->list);
593
594         closure_debug_destroy(&as->cl);
595         mempool_free(as, &c->btree_interior_update_pool);
596         percpu_ref_put(&c->writes);
597
598         closure_wake_up(&c->btree_interior_update_wait);
599         mutex_unlock(&c->btree_interior_update_lock);
600 }
601
602 static void btree_update_nodes_reachable(struct closure *cl)
603 {
604         struct btree_update *as = container_of(cl, struct btree_update, cl);
605         struct bch_fs *c = as->c;
606
607         bch2_journal_pin_drop(&c->journal, &as->journal);
608
609         mutex_lock(&c->btree_interior_update_lock);
610
611         while (as->nr_new_nodes) {
612                 struct btree *b = as->new_nodes[--as->nr_new_nodes];
613
614                 BUG_ON(b->will_make_reachable != (unsigned long) as);
615                 b->will_make_reachable = 0;
616                 mutex_unlock(&c->btree_interior_update_lock);
617
618                 /*
619                  * b->will_make_reachable prevented it from being written, so
620                  * write it now if it needs to be written:
621                  */
622                 six_lock_read(&b->lock);
623                 bch2_btree_node_write_cond(c, b, btree_node_need_write(b));
624                 six_unlock_read(&b->lock);
625                 mutex_lock(&c->btree_interior_update_lock);
626         }
627
628         while (as->nr_pending)
629                 bch2_btree_node_free_ondisk(c, &as->pending[--as->nr_pending]);
630
631         mutex_unlock(&c->btree_interior_update_lock);
632
633         closure_wake_up(&as->wait);
634
635         bch2_btree_update_free(as);
636 }
637
638 static void btree_update_wait_on_journal(struct closure *cl)
639 {
640         struct btree_update *as = container_of(cl, struct btree_update, cl);
641         struct bch_fs *c = as->c;
642         int ret;
643
644         ret = bch2_journal_open_seq_async(&c->journal, as->journal_seq, cl);
645         if (ret < 0)
646                 goto err;
647         if (!ret)
648                 continue_at(cl, btree_update_wait_on_journal, system_wq);
649
650         bch2_journal_flush_seq_async(&c->journal, as->journal_seq, cl);
651 err:
652         continue_at(cl, btree_update_nodes_reachable, system_wq);
653 }
654
655 static void btree_update_nodes_written(struct closure *cl)
656 {
657         struct btree_update *as = container_of(cl, struct btree_update, cl);
658         struct bch_fs *c = as->c;
659         struct btree *b;
660
661         /*
662          * We did an update to a parent node where the pointers we added pointed
663          * to child nodes that weren't written yet: now, the child nodes have
664          * been written so we can write out the update to the interior node.
665          */
666 retry:
667         mutex_lock(&c->btree_interior_update_lock);
668         as->nodes_written = true;
669
670         switch (as->mode) {
671         case BTREE_INTERIOR_NO_UPDATE:
672                 BUG();
673         case BTREE_INTERIOR_UPDATING_NODE:
674                 /* The usual case: */
675                 b = READ_ONCE(as->b);
676
677                 if (!six_trylock_read(&b->lock)) {
678                         mutex_unlock(&c->btree_interior_update_lock);
679                         six_lock_read(&b->lock);
680                         six_unlock_read(&b->lock);
681                         goto retry;
682                 }
683
684                 BUG_ON(!btree_node_dirty(b));
685                 closure_wait(&btree_current_write(b)->wait, cl);
686
687                 list_del(&as->write_blocked_list);
688                 mutex_unlock(&c->btree_interior_update_lock);
689
690                 /*
691                  * b->write_blocked prevented it from being written, so
692                  * write it now if it needs to be written:
693                  */
694                 bch2_btree_node_write_cond(c, b, true);
695                 six_unlock_read(&b->lock);
696                 break;
697
698         case BTREE_INTERIOR_UPDATING_AS:
699                 /*
700                  * The btree node we originally updated has been freed and is
701                  * being rewritten - so we need to write anything here, we just
702                  * need to signal to that btree_update that it's ok to make the
703                  * new replacement node visible:
704                  */
705                 closure_put(&as->parent_as->cl);
706
707                 /*
708                  * and then we have to wait on that btree_update to finish:
709                  */
710                 closure_wait(&as->parent_as->wait, cl);
711                 mutex_unlock(&c->btree_interior_update_lock);
712                 break;
713
714         case BTREE_INTERIOR_UPDATING_ROOT:
715                 /* b is the new btree root: */
716                 b = READ_ONCE(as->b);
717
718                 if (!six_trylock_read(&b->lock)) {
719                         mutex_unlock(&c->btree_interior_update_lock);
720                         six_lock_read(&b->lock);
721                         six_unlock_read(&b->lock);
722                         goto retry;
723                 }
724
725                 BUG_ON(c->btree_roots[b->btree_id].as != as);
726                 c->btree_roots[b->btree_id].as = NULL;
727
728                 bch2_btree_set_root_ondisk(c, b, WRITE);
729
730                 /*
731                  * We don't have to wait anything anything here (before
732                  * btree_update_nodes_reachable frees the old nodes
733                  * ondisk) - we've ensured that the very next journal write will
734                  * have the pointer to the new root, and before the allocator
735                  * can reuse the old nodes it'll have to do a journal commit:
736                  */
737                 six_unlock_read(&b->lock);
738                 mutex_unlock(&c->btree_interior_update_lock);
739
740                 /*
741                  * Bit of funny circularity going on here we have to break:
742                  *
743                  * We have to drop our journal pin before writing the journal
744                  * entry that points to the new btree root: else, we could
745                  * deadlock if the journal currently happens to be full.
746                  *
747                  * This mean we're dropping the journal pin _before_ the new
748                  * nodes are technically reachable - but this is safe, because
749                  * after the bch2_btree_set_root_ondisk() call above they will
750                  * be reachable as of the very next journal write:
751                  */
752                 bch2_journal_pin_drop(&c->journal, &as->journal);
753
754                 as->journal_seq = bch2_journal_last_unwritten_seq(&c->journal);
755
756                 btree_update_wait_on_journal(cl);
757                 return;
758         }
759
760         continue_at(cl, btree_update_nodes_reachable, system_wq);
761 }
762
763 /*
764  * We're updating @b with pointers to nodes that haven't finished writing yet:
765  * block @b from being written until @as completes
766  */
767 static void btree_update_updated_node(struct btree_update *as, struct btree *b)
768 {
769         struct bch_fs *c = as->c;
770
771         mutex_lock(&c->btree_interior_update_lock);
772
773         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
774         BUG_ON(!btree_node_dirty(b));
775
776         as->mode = BTREE_INTERIOR_UPDATING_NODE;
777         as->b = b;
778         list_add(&as->write_blocked_list, &b->write_blocked);
779
780         mutex_unlock(&c->btree_interior_update_lock);
781
782         /*
783          * In general, when you're staging things in a journal that will later
784          * be written elsewhere, and you also want to guarantee ordering: that
785          * is, if you have updates a, b, c, after a crash you should never see c
786          * and not a or b - there's a problem:
787          *
788          * If the final destination of the update(s) (i.e. btree node) can be
789          * written/flushed _before_ the relevant journal entry - oops, that
790          * breaks ordering, since the various leaf nodes can be written in any
791          * order.
792          *
793          * Normally we use bset->journal_seq to deal with this - if during
794          * recovery we find a btree node write that's newer than the newest
795          * journal entry, we just ignore it - we don't need it, anything we're
796          * supposed to have (that we reported as completed via fsync()) will
797          * still be in the journal, and as far as the state of the journal is
798          * concerned that btree node write never happened.
799          *
800          * That breaks when we're rewriting/splitting/merging nodes, since we're
801          * mixing btree node writes that haven't happened yet with previously
802          * written data that has been reported as completed to the journal.
803          *
804          * Thus, before making the new nodes reachable, we have to wait the
805          * newest journal sequence number we have data for to be written (if it
806          * hasn't been yet).
807          */
808         bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
809 }
810
811 static void interior_update_flush(struct journal *j,
812                         struct journal_entry_pin *pin, u64 seq)
813 {
814         struct btree_update *as =
815                 container_of(pin, struct btree_update, journal);
816
817         bch2_journal_flush_seq_async(j, as->journal_seq, NULL);
818 }
819
820 static void btree_update_reparent(struct btree_update *as,
821                                   struct btree_update *child)
822 {
823         struct bch_fs *c = as->c;
824
825         child->b = NULL;
826         child->mode = BTREE_INTERIOR_UPDATING_AS;
827         child->parent_as = as;
828         closure_get(&as->cl);
829
830         /*
831          * When we write a new btree root, we have to drop our journal pin
832          * _before_ the new nodes are technically reachable; see
833          * btree_update_nodes_written().
834          *
835          * This goes for journal pins that are recursively blocked on us - so,
836          * just transfer the journal pin to the new interior update so
837          * btree_update_nodes_written() can drop it.
838          */
839         bch2_journal_pin_add_if_older(&c->journal, &child->journal,
840                                       &as->journal, interior_update_flush);
841         bch2_journal_pin_drop(&c->journal, &child->journal);
842
843         as->journal_seq = max(as->journal_seq, child->journal_seq);
844 }
845
846 static void btree_update_updated_root(struct btree_update *as)
847 {
848         struct bch_fs *c = as->c;
849         struct btree_root *r = &c->btree_roots[as->btree_id];
850
851         mutex_lock(&c->btree_interior_update_lock);
852
853         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
854
855         /*
856          * Old root might not be persistent yet - if so, redirect its
857          * btree_update operation to point to us:
858          */
859         if (r->as)
860                 btree_update_reparent(as, r->as);
861
862         as->mode = BTREE_INTERIOR_UPDATING_ROOT;
863         as->b = r->b;
864         r->as = as;
865
866         mutex_unlock(&c->btree_interior_update_lock);
867
868         /*
869          * When we're rewriting nodes and updating interior nodes, there's an
870          * issue with updates that haven't been written in the journal getting
871          * mixed together with older data - see btree_update_updated_node()
872          * for the explanation.
873          *
874          * However, this doesn't affect us when we're writing a new btree root -
875          * because to make that new root reachable we have to write out a new
876          * journal entry, which must necessarily be newer than as->journal_seq.
877          */
878 }
879
880 static void btree_node_will_make_reachable(struct btree_update *as,
881                                            struct btree *b)
882 {
883         struct bch_fs *c = as->c;
884
885         mutex_lock(&c->btree_interior_update_lock);
886         BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
887         BUG_ON(b->will_make_reachable);
888
889         as->new_nodes[as->nr_new_nodes++] = b;
890         b->will_make_reachable = 1UL|(unsigned long) as;
891
892         closure_get(&as->cl);
893         mutex_unlock(&c->btree_interior_update_lock);
894 }
895
896 static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
897 {
898         struct btree_update *as;
899         unsigned long v;
900         unsigned i;
901
902         mutex_lock(&c->btree_interior_update_lock);
903         v = xchg(&b->will_make_reachable, 0);
904         as = (struct btree_update *) (v & ~1UL);
905
906         if (!as) {
907                 mutex_unlock(&c->btree_interior_update_lock);
908                 return;
909         }
910
911         for (i = 0; i < as->nr_new_nodes; i++)
912                 if (as->new_nodes[i] == b)
913                         goto found;
914
915         BUG();
916 found:
917         array_remove_item(as->new_nodes, as->nr_new_nodes, i);
918         mutex_unlock(&c->btree_interior_update_lock);
919
920         if (v & 1)
921                 closure_put(&as->cl);
922 }
923
924 static void btree_interior_update_add_node_reference(struct btree_update *as,
925                                                      struct btree *b)
926 {
927         struct bch_fs *c = as->c;
928         struct pending_btree_node_free *d;
929
930         mutex_lock(&c->btree_interior_update_lock);
931
932         /* Add this node to the list of nodes being freed: */
933         BUG_ON(as->nr_pending >= ARRAY_SIZE(as->pending));
934
935         d = &as->pending[as->nr_pending++];
936         d->index_update_done    = false;
937         d->seq                  = b->data->keys.seq;
938         d->btree_id             = b->btree_id;
939         d->level                = b->level;
940         bkey_copy(&d->key, &b->key);
941
942         mutex_unlock(&c->btree_interior_update_lock);
943 }
944
945 /*
946  * @b is being split/rewritten: it may have pointers to not-yet-written btree
947  * nodes and thus outstanding btree_updates - redirect @b's
948  * btree_updates to point to this btree_update:
949  */
950 void bch2_btree_interior_update_will_free_node(struct btree_update *as,
951                                                struct btree *b)
952 {
953         struct bch_fs *c = as->c;
954         struct closure *cl, *cl_n;
955         struct btree_update *p, *n;
956         struct btree_write *w;
957         struct bset_tree *t;
958
959         set_btree_node_dying(b);
960
961         if (btree_node_fake(b))
962                 return;
963
964         btree_interior_update_add_node_reference(as, b);
965
966         /*
967          * Does this node have data that hasn't been written in the journal?
968          *
969          * If so, we have to wait for the corresponding journal entry to be
970          * written before making the new nodes reachable - we can't just carry
971          * over the bset->journal_seq tracking, since we'll be mixing those keys
972          * in with keys that aren't in the journal anymore:
973          */
974         for_each_bset(b, t)
975                 as->journal_seq = max(as->journal_seq,
976                                       le64_to_cpu(bset(b, t)->journal_seq));
977
978         mutex_lock(&c->btree_interior_update_lock);
979
980         /*
981          * Does this node have any btree_update operations preventing
982          * it from being written?
983          *
984          * If so, redirect them to point to this btree_update: we can
985          * write out our new nodes, but we won't make them visible until those
986          * operations complete
987          */
988         list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
989                 list_del(&p->write_blocked_list);
990                 btree_update_reparent(as, p);
991         }
992
993         clear_btree_node_dirty(b);
994         clear_btree_node_need_write(b);
995         w = btree_current_write(b);
996
997         /*
998          * Does this node have any btree_update operations waiting on this node
999          * to be written?
1000          *
1001          * If so, wake them up when this btree_update operation is reachable:
1002          */
1003         llist_for_each_entry_safe(cl, cl_n, llist_del_all(&w->wait.list), list)
1004                 llist_add(&cl->list, &as->wait.list);
1005
1006         /*
1007          * Does this node have unwritten data that has a pin on the journal?
1008          *
1009          * If so, transfer that pin to the btree_update operation -
1010          * note that if we're freeing multiple nodes, we only need to keep the
1011          * oldest pin of any of the nodes we're freeing. We'll release the pin
1012          * when the new nodes are persistent and reachable on disk:
1013          */
1014         bch2_journal_pin_add_if_older(&c->journal, &w->journal,
1015                                       &as->journal, interior_update_flush);
1016         bch2_journal_pin_drop(&c->journal, &w->journal);
1017
1018         w = btree_prev_write(b);
1019         bch2_journal_pin_add_if_older(&c->journal, &w->journal,
1020                                       &as->journal, interior_update_flush);
1021         bch2_journal_pin_drop(&c->journal, &w->journal);
1022
1023         mutex_unlock(&c->btree_interior_update_lock);
1024 }
1025
1026 void bch2_btree_update_done(struct btree_update *as)
1027 {
1028         BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
1029
1030         bch2_btree_reserve_put(as->c, as->reserve);
1031         as->reserve = NULL;
1032
1033         continue_at(&as->cl, btree_update_nodes_written, system_freezable_wq);
1034 }
1035
1036 struct btree_update *
1037 bch2_btree_update_start(struct bch_fs *c, enum btree_id id,
1038                         unsigned nr_nodes, unsigned flags,
1039                         struct closure *cl)
1040 {
1041         struct btree_reserve *reserve;
1042         struct btree_update *as;
1043
1044         if (unlikely(!percpu_ref_tryget(&c->writes)))
1045                 return ERR_PTR(-EROFS);
1046
1047         reserve = bch2_btree_reserve_get(c, nr_nodes, flags, cl);
1048         if (IS_ERR(reserve)) {
1049                 percpu_ref_put(&c->writes);
1050                 return ERR_CAST(reserve);
1051         }
1052
1053         as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
1054         memset(as, 0, sizeof(*as));
1055         closure_init(&as->cl, NULL);
1056         as->c           = c;
1057         as->mode        = BTREE_INTERIOR_NO_UPDATE;
1058         as->btree_id    = id;
1059         as->reserve     = reserve;
1060         INIT_LIST_HEAD(&as->write_blocked_list);
1061
1062         bch2_keylist_init(&as->parent_keys, as->inline_keys);
1063
1064         mutex_lock(&c->btree_interior_update_lock);
1065         list_add_tail(&as->list, &c->btree_interior_update_list);
1066         mutex_unlock(&c->btree_interior_update_lock);
1067
1068         return as;
1069 }
1070
1071 /* Btree root updates: */
1072
1073 static void __bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
1074 {
1075         /* Root nodes cannot be reaped */
1076         mutex_lock(&c->btree_cache.lock);
1077         list_del_init(&b->list);
1078         mutex_unlock(&c->btree_cache.lock);
1079
1080         mutex_lock(&c->btree_root_lock);
1081         BUG_ON(btree_node_root(c, b) &&
1082                (b->level < btree_node_root(c, b)->level ||
1083                 !btree_node_dying(btree_node_root(c, b))));
1084
1085         btree_node_root(c, b) = b;
1086         mutex_unlock(&c->btree_root_lock);
1087
1088         bch2_recalc_btree_reserve(c);
1089 }
1090
1091 static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
1092 {
1093         struct bch_fs *c = as->c;
1094         struct btree *old = btree_node_root(c, b);
1095         struct bch_fs_usage stats = { 0 };
1096
1097         __bch2_btree_set_root_inmem(c, b);
1098
1099         bch2_mark_key(c, bkey_i_to_s_c(&b->key),
1100                       c->opts.btree_node_size, true,
1101                       gc_pos_btree_root(b->btree_id),
1102                       &stats, 0, 0);
1103
1104         if (old && !btree_node_fake(old))
1105                 bch2_btree_node_free_index(as, NULL,
1106                                            bkey_i_to_s_c(&old->key),
1107                                            &stats);
1108         bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
1109                             gc_pos_btree_root(b->btree_id));
1110 }
1111
1112 static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b, int rw)
1113 {
1114         struct btree_root *r = &c->btree_roots[b->btree_id];
1115
1116         mutex_lock(&c->btree_root_lock);
1117
1118         BUG_ON(b != r->b);
1119         bkey_copy(&r->key, &b->key);
1120         r->level = b->level;
1121         r->alive = true;
1122         if (rw == WRITE)
1123                 c->btree_roots_dirty = true;
1124
1125         mutex_unlock(&c->btree_root_lock);
1126 }
1127
1128 /**
1129  * bch_btree_set_root - update the root in memory and on disk
1130  *
1131  * To ensure forward progress, the current task must not be holding any
1132  * btree node write locks. However, you must hold an intent lock on the
1133  * old root.
1134  *
1135  * Note: This allocates a journal entry but doesn't add any keys to
1136  * it.  All the btree roots are part of every journal write, so there
1137  * is nothing new to be done.  This just guarantees that there is a
1138  * journal write.
1139  */
1140 static void bch2_btree_set_root(struct btree_update *as, struct btree *b,
1141                                 struct btree_iter *iter)
1142 {
1143         struct bch_fs *c = as->c;
1144         struct btree *old;
1145
1146         trace_btree_set_root(c, b);
1147         BUG_ON(!b->written);
1148
1149         old = btree_node_root(c, b);
1150
1151         /*
1152          * Ensure no one is using the old root while we switch to the
1153          * new root:
1154          */
1155         bch2_btree_node_lock_write(old, iter);
1156
1157         bch2_btree_set_root_inmem(as, b);
1158
1159         btree_update_updated_root(as);
1160
1161         /*
1162          * Unlock old root after new root is visible:
1163          *
1164          * The new root isn't persistent, but that's ok: we still have
1165          * an intent lock on the new root, and any updates that would
1166          * depend on the new root would have to update the new root.
1167          */
1168         bch2_btree_node_unlock_write(old, iter);
1169 }
1170
1171 /* Interior node updates: */
1172
1173 static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b,
1174                                         struct btree_iter *iter,
1175                                         struct bkey_i *insert,
1176                                         struct btree_node_iter *node_iter)
1177 {
1178         struct bch_fs *c = as->c;
1179         struct bch_fs_usage stats = { 0 };
1180         struct bkey_packed *k;
1181         struct bkey tmp;
1182
1183         BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b));
1184
1185         if (bkey_extent_is_data(&insert->k))
1186                 bch2_mark_key(c, bkey_i_to_s_c(insert),
1187                              c->opts.btree_node_size, true,
1188                              gc_pos_btree_node(b), &stats, 0, 0);
1189
1190         while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
1191                !btree_iter_pos_cmp_packed(b, &insert->k.p, k, false))
1192                 bch2_btree_node_iter_advance(node_iter, b);
1193
1194         /*
1195          * If we're overwriting, look up pending delete and mark so that gc
1196          * marks it on the pending delete list:
1197          */
1198         if (k && !bkey_cmp_packed(b, k, &insert->k))
1199                 bch2_btree_node_free_index(as, b,
1200                                            bkey_disassemble(b, k, &tmp),
1201                                            &stats);
1202
1203         bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
1204                             gc_pos_btree_node(b));
1205
1206         bch2_btree_bset_insert_key(iter, b, node_iter, insert);
1207         set_btree_node_dirty(b);
1208         set_btree_node_need_write(b);
1209 }
1210
1211 /*
1212  * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1213  * node)
1214  */
1215 static struct btree *__btree_split_node(struct btree_update *as,
1216                                         struct btree *n1,
1217                                         struct btree_iter *iter)
1218 {
1219         size_t nr_packed = 0, nr_unpacked = 0;
1220         struct btree *n2;
1221         struct bset *set1, *set2;
1222         struct bkey_packed *k, *prev = NULL;
1223
1224         n2 = bch2_btree_node_alloc(as, n1->level);
1225
1226         n2->data->max_key       = n1->data->max_key;
1227         n2->data->format        = n1->format;
1228         n2->key.k.p = n1->key.k.p;
1229
1230         btree_node_set_format(n2, n2->data->format);
1231
1232         set1 = btree_bset_first(n1);
1233         set2 = btree_bset_first(n2);
1234
1235         /*
1236          * Has to be a linear search because we don't have an auxiliary
1237          * search tree yet
1238          */
1239         k = set1->start;
1240         while (1) {
1241                 if (bkey_next(k) == vstruct_last(set1))
1242                         break;
1243                 if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
1244                         break;
1245
1246                 if (bkey_packed(k))
1247                         nr_packed++;
1248                 else
1249                         nr_unpacked++;
1250
1251                 prev = k;
1252                 k = bkey_next(k);
1253         }
1254
1255         BUG_ON(!prev);
1256
1257         n1->key.k.p = bkey_unpack_pos(n1, prev);
1258         n1->data->max_key = n1->key.k.p;
1259         n2->data->min_key =
1260                 btree_type_successor(n1->btree_id, n1->key.k.p);
1261
1262         set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
1263         set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
1264
1265         set_btree_bset_end(n1, n1->set);
1266         set_btree_bset_end(n2, n2->set);
1267
1268         n2->nr.live_u64s        = le16_to_cpu(set2->u64s);
1269         n2->nr.bset_u64s[0]     = le16_to_cpu(set2->u64s);
1270         n2->nr.packed_keys      = n1->nr.packed_keys - nr_packed;
1271         n2->nr.unpacked_keys    = n1->nr.unpacked_keys - nr_unpacked;
1272
1273         n1->nr.live_u64s        = le16_to_cpu(set1->u64s);
1274         n1->nr.bset_u64s[0]     = le16_to_cpu(set1->u64s);
1275         n1->nr.packed_keys      = nr_packed;
1276         n1->nr.unpacked_keys    = nr_unpacked;
1277
1278         BUG_ON(!set1->u64s);
1279         BUG_ON(!set2->u64s);
1280
1281         memcpy_u64s(set2->start,
1282                     vstruct_end(set1),
1283                     le16_to_cpu(set2->u64s));
1284
1285         btree_node_reset_sib_u64s(n1);
1286         btree_node_reset_sib_u64s(n2);
1287
1288         bch2_verify_btree_nr_keys(n1);
1289         bch2_verify_btree_nr_keys(n2);
1290
1291         if (n1->level) {
1292                 btree_node_interior_verify(n1);
1293                 btree_node_interior_verify(n2);
1294         }
1295
1296         return n2;
1297 }
1298
1299 /*
1300  * For updates to interior nodes, we've got to do the insert before we split
1301  * because the stuff we're inserting has to be inserted atomically. Post split,
1302  * the keys might have to go in different nodes and the split would no longer be
1303  * atomic.
1304  *
1305  * Worse, if the insert is from btree node coalescing, if we do the insert after
1306  * we do the split (and pick the pivot) - the pivot we pick might be between
1307  * nodes that were coalesced, and thus in the middle of a child node post
1308  * coalescing:
1309  */
1310 static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
1311                                     struct btree_iter *iter,
1312                                     struct keylist *keys)
1313 {
1314         struct btree_node_iter node_iter;
1315         struct bkey_i *k = bch2_keylist_front(keys);
1316         struct bkey_packed *p;
1317         struct bset *i;
1318
1319         BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
1320
1321         bch2_btree_node_iter_init(&node_iter, b, k->k.p, false, false);
1322
1323         while (!bch2_keylist_empty(keys)) {
1324                 k = bch2_keylist_front(keys);
1325
1326                 BUG_ON(bch_keylist_u64s(keys) >
1327                        bch_btree_keys_u64s_remaining(as->c, b));
1328                 BUG_ON(bkey_cmp(k->k.p, b->data->min_key) < 0);
1329                 BUG_ON(bkey_cmp(k->k.p, b->data->max_key) > 0);
1330
1331                 bch2_insert_fixup_btree_ptr(as, b, iter, k, &node_iter);
1332                 bch2_keylist_pop_front(keys);
1333         }
1334
1335         /*
1336          * We can't tolerate whiteouts here - with whiteouts there can be
1337          * duplicate keys, and it would be rather bad if we picked a duplicate
1338          * for the pivot:
1339          */
1340         i = btree_bset_first(b);
1341         p = i->start;
1342         while (p != vstruct_last(i))
1343                 if (bkey_deleted(p)) {
1344                         le16_add_cpu(&i->u64s, -p->u64s);
1345                         set_btree_bset_end(b, b->set);
1346                         memmove_u64s_down(p, bkey_next(p),
1347                                           (u64 *) vstruct_last(i) -
1348                                           (u64 *) p);
1349                 } else
1350                         p = bkey_next(p);
1351
1352         BUG_ON(b->nsets != 1 ||
1353                b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
1354
1355         btree_node_interior_verify(b);
1356 }
1357
1358 static void btree_split(struct btree_update *as, struct btree *b,
1359                         struct btree_iter *iter, struct keylist *keys)
1360 {
1361         struct bch_fs *c = as->c;
1362         struct btree *parent = btree_node_parent(iter, b);
1363         struct btree *n1, *n2 = NULL, *n3 = NULL;
1364         u64 start_time = local_clock();
1365
1366         BUG_ON(!parent && (b != btree_node_root(c, b)));
1367         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1368
1369         bch2_btree_interior_update_will_free_node(as, b);
1370
1371         n1 = bch2_btree_node_alloc_replacement(as, b);
1372
1373         if (keys)
1374                 btree_split_insert_keys(as, n1, iter, keys);
1375
1376         if (vstruct_blocks(n1->data, c->block_bits) > BTREE_SPLIT_THRESHOLD(c)) {
1377                 trace_btree_split(c, b);
1378
1379                 n2 = __btree_split_node(as, n1, iter);
1380
1381                 bch2_btree_build_aux_trees(n2);
1382                 bch2_btree_build_aux_trees(n1);
1383                 six_unlock_write(&n2->lock);
1384                 six_unlock_write(&n1->lock);
1385
1386                 bch2_btree_node_write(c, n2, SIX_LOCK_intent);
1387
1388                 /*
1389                  * Note that on recursive parent_keys == keys, so we
1390                  * can't start adding new keys to parent_keys before emptying it
1391                  * out (which we did with btree_split_insert_keys() above)
1392                  */
1393                 bch2_keylist_add(&as->parent_keys, &n1->key);
1394                 bch2_keylist_add(&as->parent_keys, &n2->key);
1395
1396                 if (!parent) {
1397                         /* Depth increases, make a new root */
1398                         n3 = __btree_root_alloc(as, b->level + 1);
1399
1400                         n3->sib_u64s[0] = U16_MAX;
1401                         n3->sib_u64s[1] = U16_MAX;
1402
1403                         btree_split_insert_keys(as, n3, iter, &as->parent_keys);
1404
1405                         bch2_btree_node_write(c, n3, SIX_LOCK_intent);
1406                 }
1407         } else {
1408                 trace_btree_compact(c, b);
1409
1410                 bch2_btree_build_aux_trees(n1);
1411                 six_unlock_write(&n1->lock);
1412
1413                 bch2_keylist_add(&as->parent_keys, &n1->key);
1414         }
1415
1416         bch2_btree_node_write(c, n1, SIX_LOCK_intent);
1417
1418         /* New nodes all written, now make them visible: */
1419
1420         if (parent) {
1421                 /* Split a non root node */
1422                 bch2_btree_insert_node(as, parent, iter, &as->parent_keys);
1423         } else if (n3) {
1424                 bch2_btree_set_root(as, n3, iter);
1425         } else {
1426                 /* Root filled up but didn't need to be split */
1427                 bch2_btree_set_root(as, n1, iter);
1428         }
1429
1430         bch2_btree_open_bucket_put(c, n1);
1431         if (n2)
1432                 bch2_btree_open_bucket_put(c, n2);
1433         if (n3)
1434                 bch2_btree_open_bucket_put(c, n3);
1435
1436         /*
1437          * Note - at this point other linked iterators could still have @b read
1438          * locked; we're depending on the bch2_btree_iter_node_replace() calls
1439          * below removing all references to @b so we don't return with other
1440          * iterators pointing to a node they have locked that's been freed.
1441          *
1442          * We have to free the node first because the bch2_iter_node_replace()
1443          * calls will drop _our_ iterator's reference - and intent lock - to @b.
1444          */
1445         bch2_btree_node_free_inmem(c, b, iter);
1446
1447         /* Successful split, update the iterator to point to the new nodes: */
1448
1449         if (n3)
1450                 bch2_btree_iter_node_replace(iter, n3);
1451         if (n2)
1452                 bch2_btree_iter_node_replace(iter, n2);
1453         bch2_btree_iter_node_replace(iter, n1);
1454
1455         bch2_time_stats_update(&c->btree_split_time, start_time);
1456 }
1457
1458 static void
1459 bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
1460                                 struct btree_iter *iter, struct keylist *keys)
1461 {
1462         struct btree_iter *linked;
1463         struct btree_node_iter node_iter;
1464         struct bkey_i *insert = bch2_keylist_front(keys);
1465         struct bkey_packed *k;
1466
1467         /* Don't screw up @iter's position: */
1468         node_iter = iter->l[b->level].iter;
1469
1470         /*
1471          * btree_split(), btree_gc_coalesce() will insert keys before
1472          * the iterator's current position - they know the keys go in
1473          * the node the iterator points to:
1474          */
1475         while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
1476                (bkey_cmp_packed(b, k, &insert->k) >= 0))
1477                 ;
1478
1479         while (!bch2_keylist_empty(keys)) {
1480                 insert = bch2_keylist_front(keys);
1481
1482                 bch2_insert_fixup_btree_ptr(as, b, iter, insert, &node_iter);
1483                 bch2_keylist_pop_front(keys);
1484         }
1485
1486         btree_update_updated_node(as, b);
1487
1488         for_each_linked_btree_node(iter, b, linked)
1489                 bch2_btree_node_iter_peek(&linked->l[b->level].iter, b);
1490         bch2_btree_node_iter_peek(&iter->l[b->level].iter, b);
1491
1492         bch2_btree_iter_verify(iter, b);
1493 }
1494
1495 /**
1496  * bch_btree_insert_node - insert bkeys into a given btree node
1497  *
1498  * @iter:               btree iterator
1499  * @keys:               list of keys to insert
1500  * @hook:               insert callback
1501  * @persistent:         if not null, @persistent will wait on journal write
1502  *
1503  * Inserts as many keys as it can into a given btree node, splitting it if full.
1504  * If a split occurred, this function will return early. This can only happen
1505  * for leaf nodes -- inserts into interior nodes have to be atomic.
1506  */
1507 void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
1508                             struct btree_iter *iter, struct keylist *keys)
1509 {
1510         struct bch_fs *c = as->c;
1511         int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
1512         int old_live_u64s = b->nr.live_u64s;
1513         int live_u64s_added, u64s_added;
1514
1515         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1516         BUG_ON(!b->level);
1517         BUG_ON(!as || as->b);
1518         bch2_verify_keylist_sorted(keys);
1519
1520         if (as->must_rewrite)
1521                 goto split;
1522
1523         bch2_btree_node_lock_for_insert(c, b, iter);
1524
1525         if (!bch2_btree_node_insert_fits(c, b, bch_keylist_u64s(keys))) {
1526                 bch2_btree_node_unlock_write(b, iter);
1527                 goto split;
1528         }
1529
1530         bch2_btree_insert_keys_interior(as, b, iter, keys);
1531
1532         live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
1533         u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
1534
1535         if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
1536                 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
1537         if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
1538                 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
1539
1540         if (u64s_added > live_u64s_added &&
1541             bch2_maybe_compact_whiteouts(c, b))
1542                 bch2_btree_iter_reinit_node(iter, b);
1543
1544         bch2_btree_node_unlock_write(b, iter);
1545
1546         btree_node_interior_verify(b);
1547
1548         bch2_foreground_maybe_merge(c, iter, b->level);
1549         return;
1550 split:
1551         btree_split(as, b, iter, keys);
1552 }
1553
1554 int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
1555                           unsigned btree_reserve_flags)
1556 {
1557         struct btree *b = iter->l[0].b;
1558         struct btree_update *as;
1559         struct closure cl;
1560         int ret = 0;
1561
1562         /*
1563          * We already have a disk reservation and open buckets pinned; this
1564          * allocation must not block:
1565          */
1566         if (iter->btree_id == BTREE_ID_EXTENTS)
1567                 btree_reserve_flags |= BTREE_INSERT_USE_RESERVE;
1568
1569         closure_init_stack(&cl);
1570
1571         /* Hack, because gc and splitting nodes doesn't mix yet: */
1572         if (!down_read_trylock(&c->gc_lock)) {
1573                 bch2_btree_iter_unlock(iter);
1574                 down_read(&c->gc_lock);
1575
1576                 if (btree_iter_linked(iter))
1577                         ret = -EINTR;
1578         }
1579
1580         /*
1581          * XXX: figure out how far we might need to split,
1582          * instead of locking/reserving all the way to the root:
1583          */
1584         if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1585                 ret = -EINTR;
1586                 goto out;
1587         }
1588
1589         as = bch2_btree_update_start(c, iter->btree_id,
1590                                      btree_update_reserve_required(c, b),
1591                                      btree_reserve_flags, &cl);
1592         if (IS_ERR(as)) {
1593                 ret = PTR_ERR(as);
1594                 if (ret == -EAGAIN) {
1595                         bch2_btree_iter_unlock(iter);
1596                         up_read(&c->gc_lock);
1597                         closure_sync(&cl);
1598                         return -EINTR;
1599                 }
1600                 goto out;
1601         }
1602
1603         btree_split(as, b, iter, NULL);
1604         bch2_btree_update_done(as);
1605
1606         bch2_btree_iter_set_locks_want(iter, 1);
1607 out:
1608         up_read(&c->gc_lock);
1609         closure_sync(&cl);
1610         return ret;
1611 }
1612
1613 int __bch2_foreground_maybe_merge(struct bch_fs *c,
1614                                   struct btree_iter *iter,
1615                                   unsigned level,
1616                                   enum btree_node_sibling sib)
1617 {
1618         struct btree_update *as;
1619         struct bkey_format_state new_s;
1620         struct bkey_format new_f;
1621         struct bkey_i delete;
1622         struct btree *b, *m, *n, *prev, *next, *parent;
1623         struct closure cl;
1624         size_t sib_u64s;
1625         int ret = 0;
1626
1627         closure_init_stack(&cl);
1628 retry:
1629         if (!bch2_btree_node_relock(iter, level))
1630                 return 0;
1631
1632         b = iter->l[level].b;
1633
1634         parent = btree_node_parent(iter, b);
1635         if (!parent)
1636                 return 0;
1637
1638         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1639                 return 0;
1640
1641         /* XXX: can't be holding read locks */
1642         m = bch2_btree_node_get_sibling(c, iter, b, sib);
1643         if (IS_ERR(m)) {
1644                 ret = PTR_ERR(m);
1645                 goto out;
1646         }
1647
1648         /* NULL means no sibling: */
1649         if (!m) {
1650                 b->sib_u64s[sib] = U16_MAX;
1651                 return 0;
1652         }
1653
1654         if (sib == btree_prev_sib) {
1655                 prev = m;
1656                 next = b;
1657         } else {
1658                 prev = b;
1659                 next = m;
1660         }
1661
1662         bch2_bkey_format_init(&new_s);
1663         __bch2_btree_calc_format(&new_s, b);
1664         __bch2_btree_calc_format(&new_s, m);
1665         new_f = bch2_bkey_format_done(&new_s);
1666
1667         sib_u64s = btree_node_u64s_with_format(b, &new_f) +
1668                 btree_node_u64s_with_format(m, &new_f);
1669
1670         if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
1671                 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1672                 sib_u64s /= 2;
1673                 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1674         }
1675
1676         sib_u64s = min(sib_u64s, btree_max_u64s(c));
1677         b->sib_u64s[sib] = sib_u64s;
1678
1679         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
1680                 six_unlock_intent(&m->lock);
1681                 return 0;
1682         }
1683
1684         /* We're changing btree topology, doesn't mix with gc: */
1685         if (!down_read_trylock(&c->gc_lock)) {
1686                 six_unlock_intent(&m->lock);
1687                 bch2_btree_iter_unlock(iter);
1688
1689                 down_read(&c->gc_lock);
1690                 up_read(&c->gc_lock);
1691                 ret = -EINTR;
1692                 goto out;
1693         }
1694
1695         if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1696                 ret = -EINTR;
1697                 goto out_unlock;
1698         }
1699
1700         as = bch2_btree_update_start(c, iter->btree_id,
1701                                      btree_update_reserve_required(c, b),
1702                                      BTREE_INSERT_NOFAIL|
1703                                      BTREE_INSERT_USE_RESERVE,
1704                                      &cl);
1705         if (IS_ERR(as)) {
1706                 ret = PTR_ERR(as);
1707                 goto out_unlock;
1708         }
1709
1710         bch2_btree_interior_update_will_free_node(as, b);
1711         bch2_btree_interior_update_will_free_node(as, m);
1712
1713         n = bch2_btree_node_alloc(as, b->level);
1714
1715         n->data->min_key        = prev->data->min_key;
1716         n->data->max_key        = next->data->max_key;
1717         n->data->format         = new_f;
1718         n->key.k.p              = next->key.k.p;
1719
1720         btree_node_set_format(n, new_f);
1721
1722         bch2_btree_sort_into(c, n, prev);
1723         bch2_btree_sort_into(c, n, next);
1724
1725         bch2_btree_build_aux_trees(n);
1726         six_unlock_write(&n->lock);
1727
1728         bkey_init(&delete.k);
1729         delete.k.p = prev->key.k.p;
1730         bch2_keylist_add(&as->parent_keys, &delete);
1731         bch2_keylist_add(&as->parent_keys, &n->key);
1732
1733         bch2_btree_node_write(c, n, SIX_LOCK_intent);
1734
1735         bch2_btree_insert_node(as, parent, iter, &as->parent_keys);
1736
1737         bch2_btree_open_bucket_put(c, n);
1738         bch2_btree_node_free_inmem(c, b, iter);
1739         bch2_btree_node_free_inmem(c, m, iter);
1740         bch2_btree_iter_node_replace(iter, n);
1741
1742         bch2_btree_iter_verify(iter, n);
1743
1744         bch2_btree_update_done(as);
1745 out_unlock:
1746         if (ret != -EINTR && ret != -EAGAIN)
1747                 bch2_btree_iter_set_locks_want(iter, 1);
1748         six_unlock_intent(&m->lock);
1749         up_read(&c->gc_lock);
1750 out:
1751         if (ret == -EAGAIN || ret == -EINTR) {
1752                 bch2_btree_iter_unlock(iter);
1753                 ret = -EINTR;
1754         }
1755
1756         closure_sync(&cl);
1757
1758         if (ret == -EINTR) {
1759                 ret = bch2_btree_iter_traverse(iter);
1760                 if (!ret)
1761                         goto retry;
1762         }
1763
1764         return ret;
1765 }
1766
1767 static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
1768                                 struct btree *b, unsigned flags,
1769                                 struct closure *cl)
1770 {
1771         struct btree *n, *parent = btree_node_parent(iter, b);
1772         struct btree_update *as;
1773
1774         as = bch2_btree_update_start(c, iter->btree_id,
1775                                      btree_update_reserve_required(c, b),
1776                                      flags, cl);
1777         if (IS_ERR(as)) {
1778                 trace_btree_gc_rewrite_node_fail(c, b);
1779                 return PTR_ERR(as);
1780         }
1781
1782         bch2_btree_interior_update_will_free_node(as, b);
1783
1784         n = bch2_btree_node_alloc_replacement(as, b);
1785
1786         bch2_btree_build_aux_trees(n);
1787         six_unlock_write(&n->lock);
1788
1789         trace_btree_gc_rewrite_node(c, b);
1790
1791         bch2_btree_node_write(c, n, SIX_LOCK_intent);
1792
1793         if (parent) {
1794                 bch2_btree_insert_node(as, parent, iter,
1795                                        &keylist_single(&n->key));
1796         } else {
1797                 bch2_btree_set_root(as, n, iter);
1798         }
1799
1800         bch2_btree_open_bucket_put(c, n);
1801
1802         bch2_btree_node_free_inmem(c, b, iter);
1803
1804         BUG_ON(!bch2_btree_iter_node_replace(iter, n));
1805
1806         bch2_btree_update_done(as);
1807         return 0;
1808 }
1809
1810 /**
1811  * bch_btree_node_rewrite - Rewrite/move a btree node
1812  *
1813  * Returns 0 on success, -EINTR or -EAGAIN on failure (i.e.
1814  * btree_check_reserve() has to wait)
1815  */
1816 int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
1817                             __le64 seq, unsigned flags)
1818 {
1819         unsigned locks_want = iter->locks_want;
1820         struct closure cl;
1821         struct btree *b;
1822         int ret;
1823
1824         flags |= BTREE_INSERT_NOFAIL;
1825
1826         closure_init_stack(&cl);
1827
1828         bch2_btree_iter_set_locks_want(iter, U8_MAX);
1829
1830         if (!(flags & BTREE_INSERT_GC_LOCK_HELD)) {
1831                 if (!down_read_trylock(&c->gc_lock)) {
1832                         bch2_btree_iter_unlock(iter);
1833                         down_read(&c->gc_lock);
1834                 }
1835         }
1836
1837         while (1) {
1838                 ret = bch2_btree_iter_traverse(iter);
1839                 if (ret)
1840                         break;
1841
1842                 b = bch2_btree_iter_peek_node(iter);
1843                 if (!b || b->data->keys.seq != seq)
1844                         break;
1845
1846                 ret = __btree_node_rewrite(c, iter, b, flags, &cl);
1847                 if (ret != -EAGAIN &&
1848                     ret != -EINTR)
1849                         break;
1850
1851                 bch2_btree_iter_unlock(iter);
1852                 closure_sync(&cl);
1853         }
1854
1855         bch2_btree_iter_set_locks_want(iter, locks_want);
1856
1857         if (!(flags & BTREE_INSERT_GC_LOCK_HELD))
1858                 up_read(&c->gc_lock);
1859
1860         closure_sync(&cl);
1861         return ret;
1862 }
1863
1864 static void __bch2_btree_node_update_key(struct bch_fs *c,
1865                                          struct btree_update *as,
1866                                          struct btree_iter *iter,
1867                                          struct btree *b, struct btree *new_hash,
1868                                          struct bkey_i_extent *new_key)
1869 {
1870         struct btree *parent;
1871         int ret;
1872
1873         /*
1874          * Two corner cases that need to be thought about here:
1875          *
1876          * @b may not be reachable yet - there might be another interior update
1877          * operation waiting on @b to be written, and we're gonna deliver the
1878          * write completion to that interior update operation _before_
1879          * persisting the new_key update
1880          *
1881          * That ends up working without us having to do anything special here:
1882          * the reason is, we do kick off (and do the in memory updates) for the
1883          * update for @new_key before we return, creating a new interior_update
1884          * operation here.
1885          *
1886          * The new interior update operation here will in effect override the
1887          * previous one. The previous one was going to terminate - make @b
1888          * reachable - in one of two ways:
1889          * - updating the btree root pointer
1890          *   In that case,
1891          *   no, this doesn't work. argh.
1892          */
1893
1894         if (b->will_make_reachable)
1895                 as->must_rewrite = true;
1896
1897         btree_interior_update_add_node_reference(as, b);
1898
1899         parent = btree_node_parent(iter, b);
1900         if (parent) {
1901                 if (new_hash) {
1902                         bkey_copy(&new_hash->key, &new_key->k_i);
1903                         ret = bch2_btree_node_hash_insert(&c->btree_cache,
1904                                         new_hash, b->level, b->btree_id);
1905                         BUG_ON(ret);
1906                 }
1907
1908                 bch2_keylist_add(&as->parent_keys, &new_key->k_i);
1909                 bch2_btree_insert_node(as, parent, iter, &as->parent_keys);
1910
1911                 if (new_hash) {
1912                         mutex_lock(&c->btree_cache.lock);
1913                         bch2_btree_node_hash_remove(&c->btree_cache, new_hash);
1914
1915                         bch2_btree_node_hash_remove(&c->btree_cache, b);
1916
1917                         bkey_copy(&b->key, &new_key->k_i);
1918                         ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
1919                         BUG_ON(ret);
1920                         mutex_unlock(&c->btree_cache.lock);
1921                 } else {
1922                         bkey_copy(&b->key, &new_key->k_i);
1923                 }
1924         } else {
1925                 struct bch_fs_usage stats = { 0 };
1926
1927                 BUG_ON(btree_node_root(c, b) != b);
1928
1929                 bch2_btree_node_lock_write(b, iter);
1930
1931                 bch2_mark_key(c, bkey_i_to_s_c(&new_key->k_i),
1932                               c->opts.btree_node_size, true,
1933                               gc_pos_btree_root(b->btree_id),
1934                               &stats, 0, 0);
1935                 bch2_btree_node_free_index(as, NULL,
1936                                            bkey_i_to_s_c(&b->key),
1937                                            &stats);
1938                 bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
1939                                     gc_pos_btree_root(b->btree_id));
1940
1941                 if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
1942                         mutex_lock(&c->btree_cache.lock);
1943                         bch2_btree_node_hash_remove(&c->btree_cache, b);
1944
1945                         bkey_copy(&b->key, &new_key->k_i);
1946                         ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
1947                         BUG_ON(ret);
1948                         mutex_unlock(&c->btree_cache.lock);
1949                 } else {
1950                         bkey_copy(&b->key, &new_key->k_i);
1951                 }
1952
1953                 btree_update_updated_root(as);
1954                 bch2_btree_node_unlock_write(b, iter);
1955         }
1956
1957         bch2_btree_update_done(as);
1958 }
1959
1960 int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
1961                                struct btree *b, struct bkey_i_extent *new_key)
1962 {
1963         struct btree_update *as = NULL;
1964         struct btree *new_hash = NULL;
1965         struct closure cl;
1966         int ret;
1967
1968         closure_init_stack(&cl);
1969
1970         if (!down_read_trylock(&c->gc_lock)) {
1971                 bch2_btree_iter_unlock(iter);
1972                 down_read(&c->gc_lock);
1973
1974                 if (!bch2_btree_iter_relock(iter)) {
1975                         ret = -EINTR;
1976                         goto err;
1977                 }
1978         }
1979
1980         /* check PTR_HASH() after @b is locked by btree_iter_traverse(): */
1981         if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
1982                 /* bch2_btree_reserve_get will unlock */
1983                 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1984                 if (ret) {
1985                         ret = -EINTR;
1986
1987                         bch2_btree_iter_unlock(iter);
1988                         up_read(&c->gc_lock);
1989                         closure_sync(&cl);
1990                         down_read(&c->gc_lock);
1991
1992                         if (!bch2_btree_iter_relock(iter))
1993                                 goto err;
1994                 }
1995
1996                 new_hash = bch2_btree_node_mem_alloc(c);
1997         }
1998
1999         as = bch2_btree_update_start(c, iter->btree_id,
2000                                      btree_update_reserve_required(c, b),
2001                                      BTREE_INSERT_NOFAIL|
2002                                      BTREE_INSERT_USE_RESERVE|
2003                                      BTREE_INSERT_USE_ALLOC_RESERVE,
2004                                      &cl);
2005         if (IS_ERR(as)) {
2006                 ret = PTR_ERR(as);
2007                 if (ret == -EAGAIN)
2008                         ret = -EINTR;
2009
2010                 if (ret != -EINTR)
2011                         goto err;
2012
2013                 bch2_btree_iter_unlock(iter);
2014                 up_read(&c->gc_lock);
2015                 closure_sync(&cl);
2016                 down_read(&c->gc_lock);
2017
2018                 if (!bch2_btree_iter_relock(iter))
2019                         goto err;
2020         }
2021
2022         ret = bch2_check_mark_super(c, BCH_DATA_BTREE,
2023                                     bch2_extent_devs(extent_i_to_s_c(new_key)));
2024         if (ret)
2025                 goto err_free_update;
2026
2027         __bch2_btree_node_update_key(c, as, iter, b, new_hash, new_key);
2028 err:
2029         if (new_hash) {
2030                 mutex_lock(&c->btree_cache.lock);
2031                 list_move(&new_hash->list, &c->btree_cache.freeable);
2032                 mutex_unlock(&c->btree_cache.lock);
2033
2034                 six_unlock_write(&new_hash->lock);
2035                 six_unlock_intent(&new_hash->lock);
2036         }
2037         up_read(&c->gc_lock);
2038         closure_sync(&cl);
2039         return ret;
2040 err_free_update:
2041         bch2_btree_update_free(as);
2042         goto err;
2043 }
2044
2045 /* Init code: */
2046
2047 /*
2048  * Only for filesystem bringup, when first reading the btree roots or allocating
2049  * btree roots when initializing a new filesystem:
2050  */
2051 void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
2052 {
2053         BUG_ON(btree_node_root(c, b));
2054
2055         __bch2_btree_set_root_inmem(c, b);
2056         bch2_btree_set_root_ondisk(c, b, READ);
2057 }
2058
2059 void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
2060 {
2061         struct closure cl;
2062         struct btree *b;
2063         int ret;
2064
2065         closure_init_stack(&cl);
2066
2067         do {
2068                 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
2069                 closure_sync(&cl);
2070         } while (ret);
2071
2072         b = bch2_btree_node_mem_alloc(c);
2073         bch2_btree_cache_cannibalize_unlock(c);
2074
2075         set_btree_node_fake(b);
2076         b->level        = 0;
2077         b->btree_id     = id;
2078
2079         bkey_extent_init(&b->key);
2080         b->key.k.p = POS_MAX;
2081         bkey_i_to_extent(&b->key)->v._data[0] = U64_MAX - id;
2082
2083         bch2_bset_init_first(b, &b->data->keys);
2084         bch2_btree_build_aux_trees(b);
2085
2086         b->data->min_key = POS_MIN;
2087         b->data->max_key = POS_MAX;
2088         b->data->format = bch2_btree_calc_format(b);
2089         btree_node_set_format(b, b->data->format);
2090
2091         ret = bch2_btree_node_hash_insert(&c->btree_cache, b, b->level, b->btree_id);
2092         BUG_ON(ret);
2093
2094         __bch2_btree_set_root_inmem(c, b);
2095
2096         six_unlock_write(&b->lock);
2097         six_unlock_intent(&b->lock);
2098 }
2099
2100 ssize_t bch2_btree_updates_print(struct bch_fs *c, char *buf)
2101 {
2102         char *out = buf, *end = buf + PAGE_SIZE;
2103         struct btree_update *as;
2104
2105         mutex_lock(&c->btree_interior_update_lock);
2106         list_for_each_entry(as, &c->btree_interior_update_list, list)
2107                 out += scnprintf(out, end - out, "%p m %u w %u r %u j %llu\n",
2108                                  as,
2109                                  as->mode,
2110                                  as->nodes_written,
2111                                  atomic_read(&as->cl.remaining) & CLOSURE_REMAINING_MASK,
2112                                  bch2_journal_pin_seq(&c->journal, &as->journal));
2113         mutex_unlock(&c->btree_interior_update_lock);
2114
2115         return out - buf;
2116 }