]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_update.c
Update bcachefs sources to 14e9ac5016 bcachefs: btree_iter fastpath
[bcachefs-tools-debian] / libbcachefs / btree_update.c
1
2 #include "bcachefs.h"
3 #include "alloc.h"
4 #include "bkey_methods.h"
5 #include "btree_cache.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_io.h"
9 #include "btree_iter.h"
10 #include "btree_locking.h"
11 #include "buckets.h"
12 #include "extents.h"
13 #include "journal.h"
14 #include "keylist.h"
15 #include "super-io.h"
16
17 #include <linux/random.h>
18 #include <linux/sort.h>
19 #include <trace/events/bcachefs.h>
20
21 static void btree_interior_update_updated_root(struct bch_fs *,
22                                                struct btree_interior_update *,
23                                                enum btree_id);
24 static void btree_interior_update_will_make_reachable(struct bch_fs *,
25                                 struct btree_interior_update *,
26                                 struct btree *);
27 static void btree_interior_update_drop_new_node(struct bch_fs *,
28                                                 struct btree *);
29
30 /* Calculate ideal packed bkey format for new btree nodes: */
31
32 void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
33 {
34         struct bkey_packed *k;
35         struct bset_tree *t;
36         struct bkey uk;
37
38         bch2_bkey_format_add_pos(s, b->data->min_key);
39
40         for_each_bset(b, t)
41                 for (k = btree_bkey_first(b, t);
42                      k != btree_bkey_last(b, t);
43                      k = bkey_next(k))
44                         if (!bkey_whiteout(k)) {
45                                 uk = bkey_unpack_key(b, k);
46                                 bch2_bkey_format_add_key(s, &uk);
47                         }
48 }
49
50 static struct bkey_format bch2_btree_calc_format(struct btree *b)
51 {
52         struct bkey_format_state s;
53
54         bch2_bkey_format_init(&s);
55         __bch2_btree_calc_format(&s, b);
56
57         return bch2_bkey_format_done(&s);
58 }
59
60 static size_t btree_node_u64s_with_format(struct btree *b,
61                                           struct bkey_format *new_f)
62 {
63         struct bkey_format *old_f = &b->format;
64
65         /* stupid integer promotion rules */
66         ssize_t delta =
67             (((int) new_f->key_u64s - old_f->key_u64s) *
68              (int) b->nr.packed_keys) +
69             (((int) new_f->key_u64s - BKEY_U64s) *
70              (int) b->nr.unpacked_keys);
71
72         BUG_ON(delta + b->nr.live_u64s < 0);
73
74         return b->nr.live_u64s + delta;
75 }
76
77 /**
78  * btree_node_format_fits - check if we could rewrite node with a new format
79  *
80  * This assumes all keys can pack with the new format -- it just checks if
81  * the re-packed keys would fit inside the node itself.
82  */
83 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
84                                 struct bkey_format *new_f)
85 {
86         size_t u64s = btree_node_u64s_with_format(b, new_f);
87
88         return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
89 }
90
91 /* Btree node freeing/allocation: */
92
93 /*
94  * We're doing the index update that makes @b unreachable, update stuff to
95  * reflect that:
96  *
97  * Must be called _before_ btree_interior_update_updated_root() or
98  * btree_interior_update_updated_btree:
99  */
100 static void bch2_btree_node_free_index(struct bch_fs *c, struct btree *b,
101                                       enum btree_id id, struct bkey_s_c k,
102                                       struct bch_fs_usage *stats)
103 {
104         struct btree_interior_update *as;
105         struct pending_btree_node_free *d;
106
107         mutex_lock(&c->btree_interior_update_lock);
108
109         for_each_pending_btree_node_free(c, as, d)
110                 if (!bkey_cmp(k.k->p, d->key.k.p) &&
111                     bkey_val_bytes(k.k) == bkey_val_bytes(&d->key.k) &&
112                     !memcmp(k.v, &d->key.v, bkey_val_bytes(k.k)))
113                         goto found;
114
115         BUG();
116 found:
117         d->index_update_done = true;
118
119         /*
120          * Btree nodes are accounted as freed in bch_alloc_stats when they're
121          * freed from the index:
122          */
123         stats->s[S_COMPRESSED][S_META]   -= c->sb.btree_node_size;
124         stats->s[S_UNCOMPRESSED][S_META] -= c->sb.btree_node_size;
125
126         /*
127          * We're dropping @k from the btree, but it's still live until the
128          * index update is persistent so we need to keep a reference around for
129          * mark and sweep to find - that's primarily what the
130          * btree_node_pending_free list is for.
131          *
132          * So here (when we set index_update_done = true), we're moving an
133          * existing reference to a different part of the larger "gc keyspace" -
134          * and the new position comes after the old position, since GC marks
135          * the pending free list after it walks the btree.
136          *
137          * If we move the reference while mark and sweep is _between_ the old
138          * and the new position, mark and sweep will see the reference twice
139          * and it'll get double accounted - so check for that here and subtract
140          * to cancel out one of mark and sweep's markings if necessary:
141          */
142
143         /*
144          * bch2_mark_key() compares the current gc pos to the pos we're
145          * moving this reference from, hence one comparison here:
146          */
147         if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
148                 struct bch_fs_usage tmp = { 0 };
149
150                 bch2_mark_key(c, bkey_i_to_s_c(&d->key),
151                              -c->sb.btree_node_size, true, b
152                              ? gc_pos_btree_node(b)
153                              : gc_pos_btree_root(id),
154                              &tmp, 0);
155                 /*
156                  * Don't apply tmp - pending deletes aren't tracked in
157                  * bch_alloc_stats:
158                  */
159         }
160
161         mutex_unlock(&c->btree_interior_update_lock);
162 }
163
164 static void __btree_node_free(struct bch_fs *c, struct btree *b,
165                               struct btree_iter *iter)
166 {
167         trace_btree_node_free(c, b);
168
169         BUG_ON(btree_node_dirty(b));
170         BUG_ON(btree_node_need_write(b));
171         BUG_ON(b == btree_node_root(c, b));
172         BUG_ON(b->ob);
173         BUG_ON(!list_empty(&b->write_blocked));
174         BUG_ON(b->will_make_reachable);
175
176         clear_btree_node_noevict(b);
177
178         six_lock_write(&b->lock);
179
180         bch2_btree_node_hash_remove(c, b);
181
182         mutex_lock(&c->btree_cache_lock);
183         list_move(&b->list, &c->btree_cache_freeable);
184         mutex_unlock(&c->btree_cache_lock);
185
186         /*
187          * By using six_unlock_write() directly instead of
188          * bch2_btree_node_unlock_write(), we don't update the iterator's
189          * sequence numbers and cause future bch2_btree_node_relock() calls to
190          * fail:
191          */
192         six_unlock_write(&b->lock);
193 }
194
195 void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
196 {
197         struct open_bucket *ob = b->ob;
198
199         btree_interior_update_drop_new_node(c, b);
200
201         b->ob = NULL;
202
203         clear_btree_node_dirty(b);
204
205         __btree_node_free(c, b, NULL);
206
207         bch2_open_bucket_put(c, ob);
208 }
209
210 void bch2_btree_node_free_inmem(struct btree_iter *iter, struct btree *b)
211 {
212         bch2_btree_iter_node_drop_linked(iter, b);
213
214         __btree_node_free(iter->c, b, iter);
215
216         bch2_btree_iter_node_drop(iter, b);
217 }
218
219 static void bch2_btree_node_free_ondisk(struct bch_fs *c,
220                                        struct pending_btree_node_free *pending)
221 {
222         struct bch_fs_usage stats = { 0 };
223
224         BUG_ON(!pending->index_update_done);
225
226         bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
227                      -c->sb.btree_node_size, true,
228                      gc_phase(GC_PHASE_PENDING_DELETE),
229                      &stats, 0);
230         /*
231          * Don't apply stats - pending deletes aren't tracked in
232          * bch_alloc_stats:
233          */
234 }
235
236 void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *b)
237 {
238         bch2_open_bucket_put(c, b->ob);
239         b->ob = NULL;
240 }
241
242 static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
243                                              struct disk_reservation *res,
244                                              struct closure *cl,
245                                              unsigned flags)
246 {
247         BKEY_PADDED(k) tmp;
248         struct open_bucket *ob;
249         struct btree *b;
250         unsigned nr_reserve;
251         enum alloc_reserve alloc_reserve;
252
253         if (flags & BTREE_INSERT_USE_ALLOC_RESERVE) {
254                 nr_reserve      = 0;
255                 alloc_reserve   = RESERVE_ALLOC;
256         } else if (flags & BTREE_INSERT_USE_RESERVE) {
257                 nr_reserve      = BTREE_NODE_RESERVE / 2;
258                 alloc_reserve   = RESERVE_BTREE;
259         } else {
260                 nr_reserve      = BTREE_NODE_RESERVE;
261                 alloc_reserve   = RESERVE_NONE;
262         }
263
264         mutex_lock(&c->btree_reserve_cache_lock);
265         if (c->btree_reserve_cache_nr > nr_reserve) {
266                 struct btree_alloc *a =
267                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
268
269                 ob = a->ob;
270                 bkey_copy(&tmp.k, &a->k);
271                 mutex_unlock(&c->btree_reserve_cache_lock);
272                 goto mem_alloc;
273         }
274         mutex_unlock(&c->btree_reserve_cache_lock);
275
276 retry:
277         /* alloc_sectors is weird, I suppose */
278         bkey_extent_init(&tmp.k);
279         tmp.k.k.size = c->sb.btree_node_size,
280
281         ob = bch2_alloc_sectors(c, &c->btree_write_point,
282                                bkey_i_to_extent(&tmp.k),
283                                res->nr_replicas,
284                                c->opts.metadata_replicas_required,
285                                alloc_reserve, cl);
286         if (IS_ERR(ob))
287                 return ERR_CAST(ob);
288
289         if (tmp.k.k.size < c->sb.btree_node_size) {
290                 bch2_open_bucket_put(c, ob);
291                 goto retry;
292         }
293 mem_alloc:
294         b = bch2_btree_node_mem_alloc(c);
295
296         /* we hold cannibalize_lock: */
297         BUG_ON(IS_ERR(b));
298         BUG_ON(b->ob);
299
300         bkey_copy(&b->key, &tmp.k);
301         b->key.k.size = 0;
302         b->ob = ob;
303
304         return b;
305 }
306
307 static struct btree *bch2_btree_node_alloc(struct bch_fs *c,
308                                           unsigned level, enum btree_id id,
309                                           struct btree_interior_update *as,
310                                           struct btree_reserve *reserve)
311 {
312         struct btree *b;
313
314         BUG_ON(!reserve->nr);
315
316         b = reserve->b[--reserve->nr];
317
318         BUG_ON(bch2_btree_node_hash_insert(c, b, level, id));
319
320         set_btree_node_accessed(b);
321         set_btree_node_dirty(b);
322
323         bch2_bset_init_first(b, &b->data->keys);
324         memset(&b->nr, 0, sizeof(b->nr));
325         b->data->magic = cpu_to_le64(bset_magic(c));
326         b->data->flags = 0;
327         SET_BTREE_NODE_ID(b->data, id);
328         SET_BTREE_NODE_LEVEL(b->data, level);
329         b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr;
330
331         bch2_btree_build_aux_trees(b);
332
333         btree_interior_update_will_make_reachable(c, as, b);
334
335         trace_btree_node_alloc(c, b);
336         return b;
337 }
338
339 struct btree *__bch2_btree_node_alloc_replacement(struct bch_fs *c,
340                                                   struct btree *b,
341                                                   struct bkey_format format,
342                                                   struct btree_interior_update *as,
343                                                   struct btree_reserve *reserve)
344 {
345         struct btree *n;
346
347         n = bch2_btree_node_alloc(c, b->level, b->btree_id, as, reserve);
348
349         n->data->min_key        = b->data->min_key;
350         n->data->max_key        = b->data->max_key;
351         n->data->format         = format;
352
353         btree_node_set_format(n, format);
354
355         bch2_btree_sort_into(c, n, b);
356
357         btree_node_reset_sib_u64s(n);
358
359         n->key.k.p = b->key.k.p;
360         return n;
361 }
362
363 static struct btree *bch2_btree_node_alloc_replacement(struct bch_fs *c,
364                                                 struct btree *b,
365                                                 struct btree_interior_update *as,
366                                                 struct btree_reserve *reserve)
367 {
368         struct bkey_format new_f = bch2_btree_calc_format(b);
369
370         /*
371          * The keys might expand with the new format - if they wouldn't fit in
372          * the btree node anymore, use the old format for now:
373          */
374         if (!bch2_btree_node_format_fits(c, b, &new_f))
375                 new_f = b->format;
376
377         return __bch2_btree_node_alloc_replacement(c, b, new_f, as, reserve);
378 }
379
380 static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b,
381                                      struct btree_reserve *btree_reserve)
382 {
383         struct btree *old = btree_node_root(c, b);
384
385         /* Root nodes cannot be reaped */
386         mutex_lock(&c->btree_cache_lock);
387         list_del_init(&b->list);
388         mutex_unlock(&c->btree_cache_lock);
389
390         mutex_lock(&c->btree_root_lock);
391         btree_node_root(c, b) = b;
392         mutex_unlock(&c->btree_root_lock);
393
394         if (btree_reserve) {
395                 /*
396                  * New allocation (we're not being called because we're in
397                  * bch2_btree_root_read()) - do marking while holding
398                  * btree_root_lock:
399                  */
400                 struct bch_fs_usage stats = { 0 };
401
402                 bch2_mark_key(c, bkey_i_to_s_c(&b->key),
403                              c->sb.btree_node_size, true,
404                              gc_pos_btree_root(b->btree_id),
405                              &stats, 0);
406
407                 if (old)
408                         bch2_btree_node_free_index(c, NULL, old->btree_id,
409                                                   bkey_i_to_s_c(&old->key),
410                                                   &stats);
411                 bch2_fs_usage_apply(c, &stats, &btree_reserve->disk_res,
412                                    gc_pos_btree_root(b->btree_id));
413         }
414
415         bch2_recalc_btree_reserve(c);
416 }
417
418 static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b)
419 {
420         struct btree_root *r = &c->btree_roots[b->btree_id];
421
422         mutex_lock(&c->btree_root_lock);
423
424         BUG_ON(b != r->b);
425         bkey_copy(&r->key, &b->key);
426         r->level = b->level;
427         r->alive = true;
428
429         mutex_unlock(&c->btree_root_lock);
430 }
431
432 /*
433  * Only for filesystem bringup, when first reading the btree roots or allocating
434  * btree roots when initializing a new filesystem:
435  */
436 void bch2_btree_set_root_initial(struct bch_fs *c, struct btree *b,
437                                 struct btree_reserve *btree_reserve)
438 {
439         BUG_ON(btree_node_root(c, b));
440
441         bch2_btree_set_root_inmem(c, b, btree_reserve);
442         bch2_btree_set_root_ondisk(c, b);
443 }
444
445 /**
446  * bch_btree_set_root - update the root in memory and on disk
447  *
448  * To ensure forward progress, the current task must not be holding any
449  * btree node write locks. However, you must hold an intent lock on the
450  * old root.
451  *
452  * Note: This allocates a journal entry but doesn't add any keys to
453  * it.  All the btree roots are part of every journal write, so there
454  * is nothing new to be done.  This just guarantees that there is a
455  * journal write.
456  */
457 static void bch2_btree_set_root(struct btree_iter *iter, struct btree *b,
458                                struct btree_interior_update *as,
459                                struct btree_reserve *btree_reserve)
460 {
461         struct bch_fs *c = iter->c;
462         struct btree *old;
463
464         trace_btree_set_root(c, b);
465         BUG_ON(!b->written);
466
467         old = btree_node_root(c, b);
468
469         /*
470          * Ensure no one is using the old root while we switch to the
471          * new root:
472          */
473         bch2_btree_node_lock_write(old, iter);
474
475         bch2_btree_set_root_inmem(c, b, btree_reserve);
476
477         btree_interior_update_updated_root(c, as, iter->btree_id);
478
479         /*
480          * Unlock old root after new root is visible:
481          *
482          * The new root isn't persistent, but that's ok: we still have
483          * an intent lock on the new root, and any updates that would
484          * depend on the new root would have to update the new root.
485          */
486         bch2_btree_node_unlock_write(old, iter);
487 }
488
489 static struct btree *__btree_root_alloc(struct bch_fs *c, unsigned level,
490                                         enum btree_id id,
491                                         struct btree_interior_update *as,
492                                         struct btree_reserve *reserve)
493 {
494         struct btree *b = bch2_btree_node_alloc(c, level, id, as, reserve);
495
496         b->data->min_key = POS_MIN;
497         b->data->max_key = POS_MAX;
498         b->data->format = bch2_btree_calc_format(b);
499         b->key.k.p = POS_MAX;
500
501         btree_node_set_format(b, b->data->format);
502         bch2_btree_build_aux_trees(b);
503
504         six_unlock_write(&b->lock);
505
506         return b;
507 }
508
509 void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
510 {
511         bch2_disk_reservation_put(c, &reserve->disk_res);
512
513         mutex_lock(&c->btree_reserve_cache_lock);
514
515         while (reserve->nr) {
516                 struct btree *b = reserve->b[--reserve->nr];
517
518                 six_unlock_write(&b->lock);
519
520                 if (c->btree_reserve_cache_nr <
521                     ARRAY_SIZE(c->btree_reserve_cache)) {
522                         struct btree_alloc *a =
523                                 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
524
525                         a->ob = b->ob;
526                         b->ob = NULL;
527                         bkey_copy(&a->k, &b->key);
528                 } else {
529                         bch2_open_bucket_put(c, b->ob);
530                         b->ob = NULL;
531                 }
532
533                 __btree_node_free(c, b, NULL);
534
535                 six_unlock_intent(&b->lock);
536         }
537
538         mutex_unlock(&c->btree_reserve_cache_lock);
539
540         mempool_free(reserve, &c->btree_reserve_pool);
541 }
542
543 static struct btree_reserve *__bch2_btree_reserve_get(struct bch_fs *c,
544                                                      unsigned nr_nodes,
545                                                      unsigned flags,
546                                                      struct closure *cl)
547 {
548         struct btree_reserve *reserve;
549         struct btree *b;
550         struct disk_reservation disk_res = { 0, 0 };
551         unsigned sectors = nr_nodes * c->sb.btree_node_size;
552         int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD|
553                 BCH_DISK_RESERVATION_METADATA;
554
555         if (flags & BTREE_INSERT_NOFAIL)
556                 disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
557
558         /*
559          * This check isn't necessary for correctness - it's just to potentially
560          * prevent us from doing a lot of work that'll end up being wasted:
561          */
562         ret = bch2_journal_error(&c->journal);
563         if (ret)
564                 return ERR_PTR(ret);
565
566         if (bch2_disk_reservation_get(c, &disk_res, sectors, disk_res_flags))
567                 return ERR_PTR(-ENOSPC);
568
569         BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
570
571         /*
572          * Protects reaping from the btree node cache and using the btree node
573          * open bucket reserve:
574          */
575         ret = bch2_btree_node_cannibalize_lock(c, cl);
576         if (ret) {
577                 bch2_disk_reservation_put(c, &disk_res);
578                 return ERR_PTR(ret);
579         }
580
581         reserve = mempool_alloc(&c->btree_reserve_pool, GFP_NOIO);
582
583         reserve->disk_res = disk_res;
584         reserve->nr = 0;
585
586         while (reserve->nr < nr_nodes) {
587                 b = __bch2_btree_node_alloc(c, &disk_res,
588                                             flags & BTREE_INSERT_NOWAIT
589                                             ? NULL : cl, flags);
590                 if (IS_ERR(b)) {
591                         ret = PTR_ERR(b);
592                         goto err_free;
593                 }
594
595                 ret = bch2_check_mark_super(c, bkey_i_to_s_c_extent(&b->key),
596                                             BCH_DATA_BTREE);
597                 if (ret)
598                         goto err_free;
599
600                 reserve->b[reserve->nr++] = b;
601         }
602
603         bch2_btree_node_cannibalize_unlock(c);
604         return reserve;
605 err_free:
606         bch2_btree_reserve_put(c, reserve);
607         bch2_btree_node_cannibalize_unlock(c);
608         trace_btree_reserve_get_fail(c, nr_nodes, cl);
609         return ERR_PTR(ret);
610 }
611
612 struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
613                                             struct btree *b,
614                                             unsigned extra_nodes,
615                                             unsigned flags,
616                                             struct closure *cl)
617 {
618         unsigned depth = btree_node_root(c, b)->level - b->level;
619         unsigned nr_nodes = btree_reserve_required_nodes(depth) + extra_nodes;
620
621         return __bch2_btree_reserve_get(c, nr_nodes, flags, cl);
622 }
623
624 int bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id,
625                          struct closure *writes)
626 {
627         struct btree_interior_update as;
628         struct btree_reserve *reserve;
629         struct closure cl;
630         struct btree *b;
631
632         memset(&as, 0, sizeof(as));
633         closure_init_stack(&cl);
634
635         while (1) {
636                 /* XXX haven't calculated capacity yet :/ */
637                 reserve = __bch2_btree_reserve_get(c, 1, 0, &cl);
638                 if (!IS_ERR(reserve))
639                         break;
640
641                 if (PTR_ERR(reserve) == -ENOSPC)
642                         return PTR_ERR(reserve);
643
644                 closure_sync(&cl);
645         }
646
647         b = __btree_root_alloc(c, 0, id, &as, reserve);
648
649         bch2_btree_node_write(c, b, writes, SIX_LOCK_intent);
650
651         bch2_btree_set_root_initial(c, b, reserve);
652
653         btree_interior_update_drop_new_node(c, b);
654         bch2_btree_open_bucket_put(c, b);
655         six_unlock_intent(&b->lock);
656
657         bch2_btree_reserve_put(c, reserve);
658
659         return 0;
660 }
661
662 static void bch2_insert_fixup_btree_ptr(struct btree_iter *iter,
663                                        struct btree *b,
664                                        struct bkey_i *insert,
665                                        struct btree_node_iter *node_iter,
666                                        struct disk_reservation *disk_res)
667 {
668         struct bch_fs *c = iter->c;
669         struct bch_fs_usage stats = { 0 };
670         struct bkey_packed *k;
671         struct bkey tmp;
672
673         if (bkey_extent_is_data(&insert->k))
674                 bch2_mark_key(c, bkey_i_to_s_c(insert),
675                              c->sb.btree_node_size, true,
676                              gc_pos_btree_node(b), &stats, 0);
677
678         while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
679                !btree_iter_pos_cmp_packed(b, &insert->k.p, k, false))
680                 bch2_btree_node_iter_advance(node_iter, b);
681
682         /*
683          * If we're overwriting, look up pending delete and mark so that gc
684          * marks it on the pending delete list:
685          */
686         if (k && !bkey_cmp_packed(b, k, &insert->k))
687                 bch2_btree_node_free_index(c, b, iter->btree_id,
688                                           bkey_disassemble(b, k, &tmp),
689                                           &stats);
690
691         bch2_fs_usage_apply(c, &stats, disk_res, gc_pos_btree_node(b));
692
693         bch2_btree_bset_insert_key(iter, b, node_iter, insert);
694         set_btree_node_dirty(b);
695         set_btree_node_need_write(b);
696 }
697
698 /* Inserting into a given leaf node (last stage of insert): */
699
700 /* Handle overwrites and do insert, for non extents: */
701 bool bch2_btree_bset_insert_key(struct btree_iter *iter,
702                                struct btree *b,
703                                struct btree_node_iter *node_iter,
704                                struct bkey_i *insert)
705 {
706         const struct bkey_format *f = &b->format;
707         struct bkey_packed *k;
708         struct bset_tree *t;
709         unsigned clobber_u64s;
710
711         EBUG_ON(btree_node_just_written(b));
712         EBUG_ON(bset_written(b, btree_bset_last(b)));
713         EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
714         EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
715                 bkey_cmp(insert->k.p, b->data->max_key) > 0);
716         BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(iter->c, b));
717
718         k = bch2_btree_node_iter_peek_all(node_iter, b);
719         if (k && !bkey_cmp_packed(b, k, &insert->k)) {
720                 BUG_ON(bkey_whiteout(k));
721
722                 t = bch2_bkey_to_bset(b, k);
723
724                 if (bset_unwritten(b, bset(b, t)) &&
725                     bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) {
726                         BUG_ON(bkey_whiteout(k) != bkey_whiteout(&insert->k));
727
728                         k->type = insert->k.type;
729                         memcpy_u64s(bkeyp_val(f, k), &insert->v,
730                                     bkey_val_u64s(&insert->k));
731                         return true;
732                 }
733
734                 insert->k.needs_whiteout = k->needs_whiteout;
735
736                 btree_keys_account_key_drop(&b->nr, t - b->set, k);
737
738                 if (t == bset_tree_last(b)) {
739                         clobber_u64s = k->u64s;
740
741                         /*
742                          * If we're deleting, and the key we're deleting doesn't
743                          * need a whiteout (it wasn't overwriting a key that had
744                          * been written to disk) - just delete it:
745                          */
746                         if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
747                                 bch2_bset_delete(b, k, clobber_u64s);
748                                 bch2_btree_node_iter_fix(iter, b, node_iter, t,
749                                                         k, clobber_u64s, 0);
750                                 return true;
751                         }
752
753                         goto overwrite;
754                 }
755
756                 k->type = KEY_TYPE_DELETED;
757                 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
758                                         k->u64s, k->u64s);
759
760                 if (bkey_whiteout(&insert->k)) {
761                         reserve_whiteout(b, t, k);
762                         return true;
763                 } else {
764                         k->needs_whiteout = false;
765                 }
766         } else {
767                 /*
768                  * Deleting, but the key to delete wasn't found - nothing to do:
769                  */
770                 if (bkey_whiteout(&insert->k))
771                         return false;
772
773                 insert->k.needs_whiteout = false;
774         }
775
776         t = bset_tree_last(b);
777         k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
778         clobber_u64s = 0;
779 overwrite:
780         bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
781         if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
782                 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
783                                         clobber_u64s, k->u64s);
784         return true;
785 }
786
787 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
788                                unsigned i, u64 seq)
789 {
790         struct bch_fs *c = container_of(j, struct bch_fs, journal);
791         struct btree_write *w = container_of(pin, struct btree_write, journal);
792         struct btree *b = container_of(w, struct btree, writes[i]);
793
794         six_lock_read(&b->lock);
795         bch2_btree_node_write_dirty(c, b, NULL,
796                         (btree_current_write(b) == w &&
797                          w->journal.pin_list == journal_seq_pin(j, seq)));
798         six_unlock_read(&b->lock);
799 }
800
801 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
802 {
803         return __btree_node_flush(j, pin, 0, seq);
804 }
805
806 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
807 {
808         return __btree_node_flush(j, pin, 1, seq);
809 }
810
811 void bch2_btree_journal_key(struct btree_insert *trans,
812                            struct btree_iter *iter,
813                            struct bkey_i *insert)
814 {
815         struct bch_fs *c = trans->c;
816         struct journal *j = &c->journal;
817         struct btree *b = iter->nodes[0];
818         struct btree_write *w = btree_current_write(b);
819
820         EBUG_ON(iter->level || b->level);
821         EBUG_ON(trans->journal_res.ref !=
822                 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
823
824         if (!journal_pin_active(&w->journal))
825                 bch2_journal_pin_add(j, &trans->journal_res,
826                                      &w->journal,
827                                      btree_node_write_idx(b) == 0
828                                      ? btree_node_flush0
829                                      : btree_node_flush1);
830
831         if (trans->journal_res.ref) {
832                 u64 seq = trans->journal_res.seq;
833                 bool needs_whiteout = insert->k.needs_whiteout;
834
835                 /* ick */
836                 insert->k.needs_whiteout = false;
837                 bch2_journal_add_keys(j, &trans->journal_res,
838                                       b->btree_id, insert);
839                 insert->k.needs_whiteout = needs_whiteout;
840
841                 bch2_journal_set_has_inode(j, &trans->journal_res,
842                                            insert->k.p.inode);
843
844                 if (trans->journal_seq)
845                         *trans->journal_seq = seq;
846                 btree_bset_last(b)->journal_seq = cpu_to_le64(seq);
847         }
848
849         if (!btree_node_dirty(b))
850                 set_btree_node_dirty(b);
851 }
852
853 static enum btree_insert_ret
854 bch2_insert_fixup_key(struct btree_insert *trans,
855                      struct btree_insert_entry *insert)
856 {
857         struct btree_iter *iter = insert->iter;
858
859         BUG_ON(iter->level);
860
861         if (bch2_btree_bset_insert_key(iter,
862                                       iter->nodes[0],
863                                       &iter->node_iters[0],
864                                       insert->k))
865                 bch2_btree_journal_key(trans, iter, insert->k);
866
867         trans->did_work = true;
868         return BTREE_INSERT_OK;
869 }
870
871 static void verify_keys_sorted(struct keylist *l)
872 {
873 #ifdef CONFIG_BCACHEFS_DEBUG
874         struct bkey_i *k;
875
876         for_each_keylist_key(l, k)
877                 BUG_ON(bkey_next(k) != l->top &&
878                        bkey_cmp(k->k.p, bkey_next(k)->k.p) >= 0);
879 #endif
880 }
881
882 static void btree_node_lock_for_insert(struct btree *b, struct btree_iter *iter)
883 {
884         struct bch_fs *c = iter->c;
885
886         bch2_btree_node_lock_write(b, iter);
887
888         if (btree_node_just_written(b) &&
889             bch2_btree_post_write_cleanup(c, b))
890                 bch2_btree_iter_reinit_node(iter, b);
891
892         /*
893          * If the last bset has been written, or if it's gotten too big - start
894          * a new bset to insert into:
895          */
896         if (want_new_bset(c, b))
897                 bch2_btree_init_next(c, b, iter);
898 }
899
900 /* Asynchronous interior node update machinery */
901
902 struct btree_interior_update *
903 bch2_btree_interior_update_alloc(struct bch_fs *c)
904 {
905         struct btree_interior_update *as;
906
907         as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
908         memset(as, 0, sizeof(*as));
909         closure_init(&as->cl, &c->cl);
910         as->c           = c;
911         as->mode        = BTREE_INTERIOR_NO_UPDATE;
912         INIT_LIST_HEAD(&as->write_blocked_list);
913
914         bch2_keylist_init(&as->parent_keys, as->inline_keys,
915                          ARRAY_SIZE(as->inline_keys));
916
917         mutex_lock(&c->btree_interior_update_lock);
918         list_add(&as->list, &c->btree_interior_update_list);
919         mutex_unlock(&c->btree_interior_update_lock);
920
921         return as;
922 }
923
924 static void btree_interior_update_free(struct closure *cl)
925 {
926         struct btree_interior_update *as =
927                 container_of(cl, struct btree_interior_update, cl);
928
929         mempool_free(as, &as->c->btree_interior_update_pool);
930 }
931
932 static void btree_interior_update_nodes_reachable(struct closure *cl)
933 {
934         struct btree_interior_update *as =
935                 container_of(cl, struct btree_interior_update, cl);
936         struct bch_fs *c = as->c;
937
938         bch2_journal_pin_drop(&c->journal, &as->journal);
939
940         mutex_lock(&c->btree_interior_update_lock);
941
942         while (as->nr_new_nodes) {
943                 struct btree *b = as->new_nodes[--as->nr_new_nodes];
944
945                 BUG_ON(b->will_make_reachable != as);
946                 b->will_make_reachable = NULL;
947                 mutex_unlock(&c->btree_interior_update_lock);
948
949                 six_lock_read(&b->lock);
950                 bch2_btree_node_write_dirty(c, b, NULL, btree_node_need_write(b));
951                 six_unlock_read(&b->lock);
952                 mutex_lock(&c->btree_interior_update_lock);
953         }
954
955         while (as->nr_pending)
956                 bch2_btree_node_free_ondisk(c, &as->pending[--as->nr_pending]);
957
958         list_del(&as->list);
959         mutex_unlock(&c->btree_interior_update_lock);
960
961         closure_wake_up(&as->wait);
962
963         closure_return_with_destructor(cl, btree_interior_update_free);
964 }
965
966 static void btree_interior_update_nodes_written(struct closure *cl)
967 {
968         struct btree_interior_update *as =
969                 container_of(cl, struct btree_interior_update, cl);
970         struct bch_fs *c = as->c;
971         struct btree *b;
972
973         if (bch2_journal_error(&c->journal)) {
974                 /* XXX what? */
975                 /* we don't want to free the nodes on disk, that's what */
976         }
977
978         /* XXX: missing error handling, damnit */
979
980         /* check for journal error, bail out if we flushed */
981
982         /*
983          * We did an update to a parent node where the pointers we added pointed
984          * to child nodes that weren't written yet: now, the child nodes have
985          * been written so we can write out the update to the interior node.
986          */
987 retry:
988         mutex_lock(&c->btree_interior_update_lock);
989         switch (as->mode) {
990         case BTREE_INTERIOR_NO_UPDATE:
991                 BUG();
992         case BTREE_INTERIOR_UPDATING_NODE:
993                 /* The usual case: */
994                 b = READ_ONCE(as->b);
995
996                 if (!six_trylock_read(&b->lock)) {
997                         mutex_unlock(&c->btree_interior_update_lock);
998                         six_lock_read(&b->lock);
999                         six_unlock_read(&b->lock);
1000                         goto retry;
1001                 }
1002
1003                 BUG_ON(!btree_node_dirty(b));
1004                 closure_wait(&btree_current_write(b)->wait, cl);
1005
1006                 list_del(&as->write_blocked_list);
1007                 mutex_unlock(&c->btree_interior_update_lock);
1008
1009                 bch2_btree_node_write_dirty(c, b, NULL,
1010                                             btree_node_need_write(b));
1011                 six_unlock_read(&b->lock);
1012                 break;
1013
1014         case BTREE_INTERIOR_UPDATING_AS:
1015                 /*
1016                  * The btree node we originally updated has been freed and is
1017                  * being rewritten - so we need to write anything here, we just
1018                  * need to signal to that btree_interior_update that it's ok to make the
1019                  * new replacement node visible:
1020                  */
1021                 closure_put(&as->parent_as->cl);
1022
1023                 /*
1024                  * and then we have to wait on that btree_interior_update to finish:
1025                  */
1026                 closure_wait(&as->parent_as->wait, cl);
1027                 mutex_unlock(&c->btree_interior_update_lock);
1028                 break;
1029
1030         case BTREE_INTERIOR_UPDATING_ROOT:
1031                 /* b is the new btree root: */
1032                 b = READ_ONCE(as->b);
1033
1034                 if (!six_trylock_read(&b->lock)) {
1035                         mutex_unlock(&c->btree_interior_update_lock);
1036                         six_lock_read(&b->lock);
1037                         six_unlock_read(&b->lock);
1038                         goto retry;
1039                 }
1040
1041                 BUG_ON(c->btree_roots[b->btree_id].as != as);
1042                 c->btree_roots[b->btree_id].as = NULL;
1043
1044                 bch2_btree_set_root_ondisk(c, b);
1045
1046                 /*
1047                  * We don't have to wait anything anything here (before
1048                  * btree_interior_update_nodes_reachable frees the old nodes
1049                  * ondisk) - we've ensured that the very next journal write will
1050                  * have the pointer to the new root, and before the allocator
1051                  * can reuse the old nodes it'll have to do a journal commit:
1052                  */
1053                 six_unlock_read(&b->lock);
1054                 mutex_unlock(&c->btree_interior_update_lock);
1055
1056                 /*
1057                  * Bit of funny circularity going on here we have to break:
1058                  *
1059                  * We have to drop our journal pin before writing the journal
1060                  * entry that points to the new btree root: else, we could
1061                  * deadlock if the journal currently happens to be full.
1062                  *
1063                  * This mean we're dropping the journal pin _before_ the new
1064                  * nodes are technically reachable - but this is safe, because
1065                  * after the bch2_btree_set_root_ondisk() call above they will
1066                  * be reachable as of the very next journal write:
1067                  */
1068                 bch2_journal_pin_drop(&c->journal, &as->journal);
1069
1070                 /*
1071                  * And, do a journal write to write the pointer to the new root,
1072                  * then wait for it to complete before freeing the nodes we
1073                  * replaced:
1074                  */
1075                 bch2_journal_meta_async(&c->journal, cl);
1076                 break;
1077         }
1078
1079         continue_at(cl, btree_interior_update_nodes_reachable, system_wq);
1080 }
1081
1082 /*
1083  * We're updating @b with pointers to nodes that haven't finished writing yet:
1084  * block @b from being written until @as completes
1085  */
1086 static void btree_interior_update_updated_btree(struct bch_fs *c,
1087                                                 struct btree_interior_update *as,
1088                                                 struct btree *b)
1089 {
1090         mutex_lock(&c->btree_interior_update_lock);
1091
1092         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1093         BUG_ON(!btree_node_dirty(b));
1094
1095         as->mode = BTREE_INTERIOR_UPDATING_NODE;
1096         as->b = b;
1097         list_add(&as->write_blocked_list, &b->write_blocked);
1098
1099         mutex_unlock(&c->btree_interior_update_lock);
1100
1101         /*
1102          * In general, when you're staging things in a journal that will later
1103          * be written elsewhere, and you also want to guarantee ordering: that
1104          * is, if you have updates a, b, c, after a crash you should never see c
1105          * and not a or b - there's a problem:
1106          *
1107          * If the final destination of the update(s) (i.e. btree node) can be
1108          * written/flushed _before_ the relevant journal entry - oops, that
1109          * breaks ordering, since the various leaf nodes can be written in any
1110          * order.
1111          *
1112          * Normally we use bset->journal_seq to deal with this - if during
1113          * recovery we find a btree node write that's newer than the newest
1114          * journal entry, we just ignore it - we don't need it, anything we're
1115          * supposed to have (that we reported as completed via fsync()) will
1116          * still be in the journal, and as far as the state of the journal is
1117          * concerned that btree node write never happened.
1118          *
1119          * That breaks when we're rewriting/splitting/merging nodes, since we're
1120          * mixing btree node writes that haven't happened yet with previously
1121          * written data that has been reported as completed to the journal.
1122          *
1123          * Thus, before making the new nodes reachable, we have to wait the
1124          * newest journal sequence number we have data for to be written (if it
1125          * hasn't been yet).
1126          */
1127         bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
1128
1129         continue_at(&as->cl, btree_interior_update_nodes_written,
1130                     system_freezable_wq);
1131 }
1132
1133 static void interior_update_flush(struct journal *j,
1134                         struct journal_entry_pin *pin, u64 seq)
1135 {
1136         struct btree_interior_update *as =
1137                 container_of(pin, struct btree_interior_update, journal);
1138
1139         bch2_journal_flush_seq_async(j, as->journal_seq, NULL);
1140 }
1141
1142 static void btree_interior_update_reparent(struct bch_fs *c,
1143                                            struct btree_interior_update *as,
1144                                            struct btree_interior_update *child)
1145 {
1146         child->b = NULL;
1147         child->mode = BTREE_INTERIOR_UPDATING_AS;
1148         child->parent_as = as;
1149         closure_get(&as->cl);
1150
1151         /*
1152          * When we write a new btree root, we have to drop our journal pin
1153          * _before_ the new nodes are technically reachable; see
1154          * btree_interior_update_nodes_written().
1155          *
1156          * This goes for journal pins that are recursively blocked on us - so,
1157          * just transfer the journal pin to the new interior update so
1158          * btree_interior_update_nodes_written() can drop it.
1159          */
1160         bch2_journal_pin_add_if_older(&c->journal, &child->journal,
1161                                       &as->journal, interior_update_flush);
1162         bch2_journal_pin_drop(&c->journal, &child->journal);
1163
1164         as->journal_seq = max(as->journal_seq, child->journal_seq);
1165 }
1166
1167 static void btree_interior_update_updated_root(struct bch_fs *c,
1168                                                struct btree_interior_update *as,
1169                                                enum btree_id btree_id)
1170 {
1171         struct btree_root *r = &c->btree_roots[btree_id];
1172
1173         mutex_lock(&c->btree_interior_update_lock);
1174
1175         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1176
1177         /*
1178          * Old root might not be persistent yet - if so, redirect its
1179          * btree_interior_update operation to point to us:
1180          */
1181         if (r->as)
1182                 btree_interior_update_reparent(c, as, r->as);
1183
1184         as->mode = BTREE_INTERIOR_UPDATING_ROOT;
1185         as->b = r->b;
1186         r->as = as;
1187
1188         mutex_unlock(&c->btree_interior_update_lock);
1189
1190         /*
1191          * When we're rewriting nodes and updating interior nodes, there's an
1192          * issue with updates that haven't been written in the journal getting
1193          * mixed together with older data - see * btree_interior_update_updated_btree()
1194          * for the explanation.
1195          *
1196          * However, this doesn't affect us when we're writing a new btree root -
1197          * because to make that new root reachable we have to write out a new
1198          * journal entry, which must necessarily be newer than as->journal_seq.
1199          */
1200
1201         continue_at(&as->cl, btree_interior_update_nodes_written,
1202                     system_freezable_wq);
1203 }
1204
1205 static void btree_interior_update_will_make_reachable(struct bch_fs *c,
1206                                 struct btree_interior_update *as,
1207                                 struct btree *b)
1208 {
1209         mutex_lock(&c->btree_interior_update_lock);
1210         BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
1211         BUG_ON(b->will_make_reachable);
1212
1213         as->new_nodes[as->nr_new_nodes++] = b;
1214         b->will_make_reachable = as;
1215         mutex_unlock(&c->btree_interior_update_lock);
1216 }
1217
1218 static void __btree_interior_update_drop_new_node(struct btree *b)
1219 {
1220         struct btree_interior_update *as = b->will_make_reachable;
1221         unsigned i;
1222
1223         BUG_ON(!as);
1224
1225         for (i = 0; i < as->nr_new_nodes; i++)
1226                 if (as->new_nodes[i] == b)
1227                         goto found;
1228
1229         BUG();
1230 found:
1231         as->nr_new_nodes--;
1232         memmove(&as->new_nodes[i],
1233                 &as->new_nodes[i + 1],
1234                 sizeof(struct btree *) * (as->nr_new_nodes - i));
1235         b->will_make_reachable = NULL;
1236 }
1237
1238 static void btree_interior_update_drop_new_node(struct bch_fs *c,
1239                                                 struct btree *b)
1240 {
1241         mutex_lock(&c->btree_interior_update_lock);
1242         __btree_interior_update_drop_new_node(b);
1243         mutex_unlock(&c->btree_interior_update_lock);
1244 }
1245
1246 static void bch2_btree_interior_update_add_node_reference(struct bch_fs *c,
1247                                                    struct btree_interior_update *as,
1248                                                    struct btree *b)
1249 {
1250         struct pending_btree_node_free *d;
1251
1252         mutex_lock(&c->btree_interior_update_lock);
1253
1254         /* Add this node to the list of nodes being freed: */
1255         BUG_ON(as->nr_pending >= ARRAY_SIZE(as->pending));
1256
1257         d = &as->pending[as->nr_pending++];
1258         d->index_update_done    = false;
1259         d->seq                  = b->data->keys.seq;
1260         d->btree_id             = b->btree_id;
1261         d->level                = b->level;
1262         bkey_copy(&d->key, &b->key);
1263
1264         mutex_unlock(&c->btree_interior_update_lock);
1265 }
1266
1267 /*
1268  * @b is being split/rewritten: it may have pointers to not-yet-written btree
1269  * nodes and thus outstanding btree_interior_updates - redirect @b's
1270  * btree_interior_updates to point to this btree_interior_update:
1271  */
1272 void bch2_btree_interior_update_will_free_node(struct bch_fs *c,
1273                                               struct btree_interior_update *as,
1274                                               struct btree *b)
1275 {
1276         struct closure *cl, *cl_n;
1277         struct btree_interior_update *p, *n;
1278         struct btree_write *w;
1279         struct bset_tree *t;
1280
1281         bch2_btree_interior_update_add_node_reference(c, as, b);
1282
1283         /*
1284          * Does this node have data that hasn't been written in the journal?
1285          *
1286          * If so, we have to wait for the corresponding journal entry to be
1287          * written before making the new nodes reachable - we can't just carry
1288          * over the bset->journal_seq tracking, since we'll be mixing those keys
1289          * in with keys that aren't in the journal anymore:
1290          */
1291         for_each_bset(b, t)
1292                 as->journal_seq = max(as->journal_seq, bset(b, t)->journal_seq);
1293
1294         mutex_lock(&c->btree_interior_update_lock);
1295
1296         /*
1297          * Does this node have any btree_interior_update operations preventing
1298          * it from being written?
1299          *
1300          * If so, redirect them to point to this btree_interior_update: we can
1301          * write out our new nodes, but we won't make them visible until those
1302          * operations complete
1303          */
1304         list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
1305                 list_del(&p->write_blocked_list);
1306                 btree_interior_update_reparent(c, as, p);
1307         }
1308
1309         clear_btree_node_dirty(b);
1310         clear_btree_node_need_write(b);
1311         w = btree_current_write(b);
1312
1313         llist_for_each_entry_safe(cl, cl_n, llist_del_all(&w->wait.list), list)
1314                 llist_add(&cl->list, &as->wait.list);
1315
1316         /*
1317          * Does this node have unwritten data that has a pin on the journal?
1318          *
1319          * If so, transfer that pin to the btree_interior_update operation -
1320          * note that if we're freeing multiple nodes, we only need to keep the
1321          * oldest pin of any of the nodes we're freeing. We'll release the pin
1322          * when the new nodes are persistent and reachable on disk:
1323          */
1324         bch2_journal_pin_add_if_older(&c->journal, &w->journal,
1325                                       &as->journal, interior_update_flush);
1326         bch2_journal_pin_drop(&c->journal, &w->journal);
1327
1328         w = btree_prev_write(b);
1329         bch2_journal_pin_add_if_older(&c->journal, &w->journal,
1330                                       &as->journal, interior_update_flush);
1331         bch2_journal_pin_drop(&c->journal, &w->journal);
1332
1333         if (b->will_make_reachable)
1334                 __btree_interior_update_drop_new_node(b);
1335
1336         mutex_unlock(&c->btree_interior_update_lock);
1337 }
1338
1339 static void btree_node_interior_verify(struct btree *b)
1340 {
1341         struct btree_node_iter iter;
1342         struct bkey_packed *k;
1343
1344         BUG_ON(!b->level);
1345
1346         bch2_btree_node_iter_init(&iter, b, b->key.k.p, false, false);
1347 #if 1
1348         BUG_ON(!(k = bch2_btree_node_iter_peek(&iter, b)) ||
1349                bkey_cmp_left_packed(b, k, &b->key.k.p));
1350
1351         BUG_ON((bch2_btree_node_iter_advance(&iter, b),
1352                 !bch2_btree_node_iter_end(&iter)));
1353 #else
1354         const char *msg;
1355
1356         msg = "not found";
1357         k = bch2_btree_node_iter_peek(&iter, b);
1358         if (!k)
1359                 goto err;
1360
1361         msg = "isn't what it should be";
1362         if (bkey_cmp_left_packed(b, k, &b->key.k.p))
1363                 goto err;
1364
1365         bch2_btree_node_iter_advance(&iter, b);
1366
1367         msg = "isn't last key";
1368         if (!bch2_btree_node_iter_end(&iter))
1369                 goto err;
1370         return;
1371 err:
1372         bch2_dump_btree_node(b);
1373         printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode,
1374                b->key.k.p.offset, msg);
1375         BUG();
1376 #endif
1377 }
1378
1379 static int
1380 bch2_btree_insert_keys_interior(struct btree *b,
1381                                struct btree_iter *iter,
1382                                struct keylist *insert_keys,
1383                                struct btree_interior_update *as,
1384                                struct btree_reserve *res)
1385 {
1386         struct bch_fs *c = iter->c;
1387         struct btree_iter *linked;
1388         struct btree_node_iter node_iter;
1389         struct bkey_i *insert = bch2_keylist_front(insert_keys);
1390         struct bkey_packed *k;
1391
1392         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1393         BUG_ON(!b->level);
1394         BUG_ON(!as || as->b);
1395         verify_keys_sorted(insert_keys);
1396
1397         btree_node_lock_for_insert(b, iter);
1398
1399         if (bch_keylist_u64s(insert_keys) >
1400             bch_btree_keys_u64s_remaining(c, b)) {
1401                 bch2_btree_node_unlock_write(b, iter);
1402                 return -1;
1403         }
1404
1405         /* Don't screw up @iter's position: */
1406         node_iter = iter->node_iters[b->level];
1407
1408         /*
1409          * btree_split(), btree_gc_coalesce() will insert keys before
1410          * the iterator's current position - they know the keys go in
1411          * the node the iterator points to:
1412          */
1413         while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
1414                (bkey_cmp_packed(b, k, &insert->k) >= 0))
1415                 ;
1416
1417         while (!bch2_keylist_empty(insert_keys)) {
1418                 insert = bch2_keylist_front(insert_keys);
1419
1420                 bch2_insert_fixup_btree_ptr(iter, b, insert,
1421                                            &node_iter, &res->disk_res);
1422                 bch2_keylist_pop_front(insert_keys);
1423         }
1424
1425         btree_interior_update_updated_btree(c, as, b);
1426
1427         for_each_linked_btree_node(iter, b, linked)
1428                 bch2_btree_node_iter_peek(&linked->node_iters[b->level],
1429                                          b);
1430         bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
1431
1432         bch2_btree_iter_verify(iter, b);
1433
1434         if (bch2_maybe_compact_whiteouts(c, b))
1435                 bch2_btree_iter_reinit_node(iter, b);
1436
1437         bch2_btree_node_unlock_write(b, iter);
1438
1439         btree_node_interior_verify(b);
1440         return 0;
1441 }
1442
1443 /*
1444  * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1445  * node)
1446  */
1447 static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n1,
1448                                         struct btree_reserve *reserve,
1449                                         struct btree_interior_update *as)
1450 {
1451         struct bch_fs *c = iter->c;
1452         size_t nr_packed = 0, nr_unpacked = 0;
1453         struct btree *n2;
1454         struct bset *set1, *set2;
1455         struct bkey_packed *k, *prev = NULL;
1456
1457         n2 = bch2_btree_node_alloc(c, n1->level, iter->btree_id, as, reserve);
1458
1459         n2->data->max_key       = n1->data->max_key;
1460         n2->data->format        = n1->format;
1461         n2->key.k.p = n1->key.k.p;
1462
1463         btree_node_set_format(n2, n2->data->format);
1464
1465         set1 = btree_bset_first(n1);
1466         set2 = btree_bset_first(n2);
1467
1468         /*
1469          * Has to be a linear search because we don't have an auxiliary
1470          * search tree yet
1471          */
1472         k = set1->start;
1473         while (1) {
1474                 if (bkey_next(k) == vstruct_last(set1))
1475                         break;
1476                 if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
1477                         break;
1478
1479                 if (bkey_packed(k))
1480                         nr_packed++;
1481                 else
1482                         nr_unpacked++;
1483
1484                 prev = k;
1485                 k = bkey_next(k);
1486         }
1487
1488         BUG_ON(!prev);
1489
1490         n1->key.k.p = bkey_unpack_pos(n1, prev);
1491         n1->data->max_key = n1->key.k.p;
1492         n2->data->min_key =
1493                 btree_type_successor(n1->btree_id, n1->key.k.p);
1494
1495         set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
1496         set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
1497
1498         set_btree_bset_end(n1, n1->set);
1499         set_btree_bset_end(n2, n2->set);
1500
1501         n2->nr.live_u64s        = le16_to_cpu(set2->u64s);
1502         n2->nr.bset_u64s[0]     = le16_to_cpu(set2->u64s);
1503         n2->nr.packed_keys      = n1->nr.packed_keys - nr_packed;
1504         n2->nr.unpacked_keys    = n1->nr.unpacked_keys - nr_unpacked;
1505
1506         n1->nr.live_u64s        = le16_to_cpu(set1->u64s);
1507         n1->nr.bset_u64s[0]     = le16_to_cpu(set1->u64s);
1508         n1->nr.packed_keys      = nr_packed;
1509         n1->nr.unpacked_keys    = nr_unpacked;
1510
1511         BUG_ON(!set1->u64s);
1512         BUG_ON(!set2->u64s);
1513
1514         memcpy_u64s(set2->start,
1515                     vstruct_end(set1),
1516                     le16_to_cpu(set2->u64s));
1517
1518         btree_node_reset_sib_u64s(n1);
1519         btree_node_reset_sib_u64s(n2);
1520
1521         bch2_verify_btree_nr_keys(n1);
1522         bch2_verify_btree_nr_keys(n2);
1523
1524         if (n1->level) {
1525                 btree_node_interior_verify(n1);
1526                 btree_node_interior_verify(n2);
1527         }
1528
1529         return n2;
1530 }
1531
1532 /*
1533  * For updates to interior nodes, we've got to do the insert before we split
1534  * because the stuff we're inserting has to be inserted atomically. Post split,
1535  * the keys might have to go in different nodes and the split would no longer be
1536  * atomic.
1537  *
1538  * Worse, if the insert is from btree node coalescing, if we do the insert after
1539  * we do the split (and pick the pivot) - the pivot we pick might be between
1540  * nodes that were coalesced, and thus in the middle of a child node post
1541  * coalescing:
1542  */
1543 static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b,
1544                                     struct keylist *keys,
1545                                     struct btree_reserve *res)
1546 {
1547         struct btree_node_iter node_iter;
1548         struct bkey_i *k = bch2_keylist_front(keys);
1549         struct bkey_packed *p;
1550         struct bset *i;
1551
1552         BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
1553
1554         bch2_btree_node_iter_init(&node_iter, b, k->k.p, false, false);
1555
1556         while (!bch2_keylist_empty(keys)) {
1557                 k = bch2_keylist_front(keys);
1558
1559                 BUG_ON(bch_keylist_u64s(keys) >
1560                        bch_btree_keys_u64s_remaining(iter->c, b));
1561                 BUG_ON(bkey_cmp(k->k.p, b->data->min_key) < 0);
1562                 BUG_ON(bkey_cmp(k->k.p, b->data->max_key) > 0);
1563
1564                 bch2_insert_fixup_btree_ptr(iter, b, k, &node_iter, &res->disk_res);
1565                 bch2_keylist_pop_front(keys);
1566         }
1567
1568         /*
1569          * We can't tolerate whiteouts here - with whiteouts there can be
1570          * duplicate keys, and it would be rather bad if we picked a duplicate
1571          * for the pivot:
1572          */
1573         i = btree_bset_first(b);
1574         p = i->start;
1575         while (p != vstruct_last(i))
1576                 if (bkey_deleted(p)) {
1577                         le16_add_cpu(&i->u64s, -p->u64s);
1578                         set_btree_bset_end(b, b->set);
1579                         memmove_u64s_down(p, bkey_next(p),
1580                                           (u64 *) vstruct_last(i) -
1581                                           (u64 *) p);
1582                 } else
1583                         p = bkey_next(p);
1584
1585         BUG_ON(b->nsets != 1 ||
1586                b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
1587
1588         btree_node_interior_verify(b);
1589 }
1590
1591 static void btree_split(struct btree *b, struct btree_iter *iter,
1592                         struct keylist *insert_keys,
1593                         struct btree_reserve *reserve,
1594                         struct btree_interior_update *as)
1595 {
1596         struct bch_fs *c = iter->c;
1597         struct btree *parent = iter->nodes[b->level + 1];
1598         struct btree *n1, *n2 = NULL, *n3 = NULL;
1599         u64 start_time = local_clock();
1600
1601         BUG_ON(!parent && (b != btree_node_root(c, b)));
1602         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1603
1604         bch2_btree_interior_update_will_free_node(c, as, b);
1605
1606         n1 = bch2_btree_node_alloc_replacement(c, b, as, reserve);
1607
1608         if (b->level)
1609                 btree_split_insert_keys(iter, n1, insert_keys, reserve);
1610
1611         if (vstruct_blocks(n1->data, c->block_bits) > BTREE_SPLIT_THRESHOLD(c)) {
1612                 trace_btree_node_split(c, b, b->nr.live_u64s);
1613
1614                 n2 = __btree_split_node(iter, n1, reserve, as);
1615
1616                 bch2_btree_build_aux_trees(n2);
1617                 bch2_btree_build_aux_trees(n1);
1618                 six_unlock_write(&n2->lock);
1619                 six_unlock_write(&n1->lock);
1620
1621                 bch2_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent);
1622
1623                 /*
1624                  * Note that on recursive parent_keys == insert_keys, so we
1625                  * can't start adding new keys to parent_keys before emptying it
1626                  * out (which we did with btree_split_insert_keys() above)
1627                  */
1628                 bch2_keylist_add(&as->parent_keys, &n1->key);
1629                 bch2_keylist_add(&as->parent_keys, &n2->key);
1630
1631                 if (!parent) {
1632                         /* Depth increases, make a new root */
1633                         n3 = __btree_root_alloc(c, b->level + 1,
1634                                                 iter->btree_id,
1635                                                 as, reserve);
1636
1637                         n3->sib_u64s[0] = U16_MAX;
1638                         n3->sib_u64s[1] = U16_MAX;
1639
1640                         btree_split_insert_keys(iter, n3, &as->parent_keys,
1641                                                 reserve);
1642                         bch2_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent);
1643                 }
1644         } else {
1645                 trace_btree_node_compact(c, b, b->nr.live_u64s);
1646
1647                 bch2_btree_build_aux_trees(n1);
1648                 six_unlock_write(&n1->lock);
1649
1650                 bch2_keylist_add(&as->parent_keys, &n1->key);
1651         }
1652
1653         bch2_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent);
1654
1655         /* New nodes all written, now make them visible: */
1656
1657         if (parent) {
1658                 /* Split a non root node */
1659                 bch2_btree_insert_node(parent, iter, &as->parent_keys,
1660                                       reserve, as);
1661         } else if (n3) {
1662                 bch2_btree_set_root(iter, n3, as, reserve);
1663         } else {
1664                 /* Root filled up but didn't need to be split */
1665                 bch2_btree_set_root(iter, n1, as, reserve);
1666         }
1667
1668         bch2_btree_open_bucket_put(c, n1);
1669         if (n2)
1670                 bch2_btree_open_bucket_put(c, n2);
1671         if (n3)
1672                 bch2_btree_open_bucket_put(c, n3);
1673
1674         /*
1675          * Note - at this point other linked iterators could still have @b read
1676          * locked; we're depending on the bch2_btree_iter_node_replace() calls
1677          * below removing all references to @b so we don't return with other
1678          * iterators pointing to a node they have locked that's been freed.
1679          *
1680          * We have to free the node first because the bch2_iter_node_replace()
1681          * calls will drop _our_ iterator's reference - and intent lock - to @b.
1682          */
1683         bch2_btree_node_free_inmem(iter, b);
1684
1685         /* Successful split, update the iterator to point to the new nodes: */
1686
1687         if (n3)
1688                 bch2_btree_iter_node_replace(iter, n3);
1689         if (n2)
1690                 bch2_btree_iter_node_replace(iter, n2);
1691         bch2_btree_iter_node_replace(iter, n1);
1692
1693         bch2_time_stats_update(&c->btree_split_time, start_time);
1694 }
1695
1696 /**
1697  * bch_btree_insert_node - insert bkeys into a given btree node
1698  *
1699  * @iter:               btree iterator
1700  * @insert_keys:        list of keys to insert
1701  * @hook:               insert callback
1702  * @persistent:         if not null, @persistent will wait on journal write
1703  *
1704  * Inserts as many keys as it can into a given btree node, splitting it if full.
1705  * If a split occurred, this function will return early. This can only happen
1706  * for leaf nodes -- inserts into interior nodes have to be atomic.
1707  */
1708 void bch2_btree_insert_node(struct btree *b,
1709                            struct btree_iter *iter,
1710                            struct keylist *insert_keys,
1711                            struct btree_reserve *reserve,
1712                            struct btree_interior_update *as)
1713 {
1714         BUG_ON(!b->level);
1715         BUG_ON(!reserve || !as);
1716
1717         if ((as->flags & BTREE_INTERIOR_UPDATE_MUST_REWRITE) ||
1718             bch2_btree_insert_keys_interior(b, iter, insert_keys,
1719                                             as, reserve))
1720                 btree_split(b, iter, insert_keys, reserve, as);
1721 }
1722
1723 static int bch2_btree_split_leaf(struct btree_iter *iter, unsigned flags)
1724 {
1725         struct bch_fs *c = iter->c;
1726         struct btree *b = iter->nodes[0];
1727         struct btree_reserve *reserve;
1728         struct btree_interior_update *as;
1729         struct closure cl;
1730         int ret = 0;
1731
1732         closure_init_stack(&cl);
1733
1734         /* Hack, because gc and splitting nodes doesn't mix yet: */
1735         if (!down_read_trylock(&c->gc_lock)) {
1736                 bch2_btree_iter_unlock(iter);
1737                 down_read(&c->gc_lock);
1738         }
1739
1740         /*
1741          * XXX: figure out how far we might need to split,
1742          * instead of locking/reserving all the way to the root:
1743          */
1744         if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1745                 ret = -EINTR;
1746                 goto out;
1747         }
1748
1749         reserve = bch2_btree_reserve_get(c, b, 0, flags, &cl);
1750         if (IS_ERR(reserve)) {
1751                 ret = PTR_ERR(reserve);
1752                 if (ret == -EAGAIN) {
1753                         bch2_btree_iter_unlock(iter);
1754                         up_read(&c->gc_lock);
1755                         closure_sync(&cl);
1756                         return -EINTR;
1757                 }
1758                 goto out;
1759         }
1760
1761         as = bch2_btree_interior_update_alloc(c);
1762
1763         btree_split(b, iter, NULL, reserve, as);
1764         bch2_btree_reserve_put(c, reserve);
1765
1766         bch2_btree_iter_set_locks_want(iter, 1);
1767 out:
1768         up_read(&c->gc_lock);
1769         return ret;
1770 }
1771
1772 enum btree_node_sibling {
1773         btree_prev_sib,
1774         btree_next_sib,
1775 };
1776
1777 static struct btree *btree_node_get_sibling(struct btree_iter *iter,
1778                                             struct btree *b,
1779                                             enum btree_node_sibling sib)
1780 {
1781         struct btree *parent;
1782         struct btree_node_iter node_iter;
1783         struct bkey_packed *k;
1784         BKEY_PADDED(k) tmp;
1785         struct btree *ret;
1786         unsigned level = b->level;
1787
1788         parent = iter->nodes[level + 1];
1789         if (!parent)
1790                 return NULL;
1791
1792         if (!bch2_btree_node_relock(iter, level + 1)) {
1793                 bch2_btree_iter_set_locks_want(iter, level + 2);
1794                 return ERR_PTR(-EINTR);
1795         }
1796
1797         node_iter = iter->node_iters[parent->level];
1798
1799         k = bch2_btree_node_iter_peek_all(&node_iter, parent);
1800         BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
1801
1802         do {
1803                 k = sib == btree_prev_sib
1804                         ? bch2_btree_node_iter_prev_all(&node_iter, parent)
1805                         : (bch2_btree_node_iter_advance(&node_iter, parent),
1806                            bch2_btree_node_iter_peek_all(&node_iter, parent));
1807                 if (!k)
1808                         return NULL;
1809         } while (bkey_deleted(k));
1810
1811         bch2_bkey_unpack(parent, &tmp.k, k);
1812
1813         ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1814
1815         if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) {
1816                 btree_node_unlock(iter, level);
1817                 ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1818         }
1819
1820         if (!IS_ERR(ret) && !bch2_btree_node_relock(iter, level)) {
1821                 six_unlock_intent(&ret->lock);
1822                 ret = ERR_PTR(-EINTR);
1823         }
1824
1825         return ret;
1826 }
1827
1828 static int __foreground_maybe_merge(struct btree_iter *iter,
1829                                     enum btree_node_sibling sib)
1830 {
1831         struct bch_fs *c = iter->c;
1832         struct btree_reserve *reserve;
1833         struct btree_interior_update *as;
1834         struct bkey_format_state new_s;
1835         struct bkey_format new_f;
1836         struct bkey_i delete;
1837         struct btree *b, *m, *n, *prev, *next, *parent;
1838         struct closure cl;
1839         size_t sib_u64s;
1840         int ret = 0;
1841
1842         closure_init_stack(&cl);
1843 retry:
1844         if (!bch2_btree_node_relock(iter, iter->level))
1845                 return 0;
1846
1847         b = iter->nodes[iter->level];
1848
1849         parent = iter->nodes[b->level + 1];
1850         if (!parent)
1851                 return 0;
1852
1853         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1854                 return 0;
1855
1856         /* XXX: can't be holding read locks */
1857         m = btree_node_get_sibling(iter, b, sib);
1858         if (IS_ERR(m)) {
1859                 ret = PTR_ERR(m);
1860                 goto out;
1861         }
1862
1863         /* NULL means no sibling: */
1864         if (!m) {
1865                 b->sib_u64s[sib] = U16_MAX;
1866                 return 0;
1867         }
1868
1869         if (sib == btree_prev_sib) {
1870                 prev = m;
1871                 next = b;
1872         } else {
1873                 prev = b;
1874                 next = m;
1875         }
1876
1877         bch2_bkey_format_init(&new_s);
1878         __bch2_btree_calc_format(&new_s, b);
1879         __bch2_btree_calc_format(&new_s, m);
1880         new_f = bch2_bkey_format_done(&new_s);
1881
1882         sib_u64s = btree_node_u64s_with_format(b, &new_f) +
1883                 btree_node_u64s_with_format(m, &new_f);
1884
1885         if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
1886                 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1887                 sib_u64s /= 2;
1888                 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1889         }
1890
1891         sib_u64s = min(sib_u64s, btree_max_u64s(c));
1892         b->sib_u64s[sib] = sib_u64s;
1893
1894         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
1895                 six_unlock_intent(&m->lock);
1896                 return 0;
1897         }
1898
1899         /* We're changing btree topology, doesn't mix with gc: */
1900         if (!down_read_trylock(&c->gc_lock)) {
1901                 six_unlock_intent(&m->lock);
1902                 bch2_btree_iter_unlock(iter);
1903
1904                 down_read(&c->gc_lock);
1905                 up_read(&c->gc_lock);
1906                 ret = -EINTR;
1907                 goto out;
1908         }
1909
1910         if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1911                 ret = -EINTR;
1912                 goto out_unlock;
1913         }
1914
1915         reserve = bch2_btree_reserve_get(c, b, 0,
1916                                         BTREE_INSERT_NOFAIL|
1917                                         BTREE_INSERT_USE_RESERVE,
1918                                         &cl);
1919         if (IS_ERR(reserve)) {
1920                 ret = PTR_ERR(reserve);
1921                 goto out_unlock;
1922         }
1923
1924         as = bch2_btree_interior_update_alloc(c);
1925
1926         bch2_btree_interior_update_will_free_node(c, as, b);
1927         bch2_btree_interior_update_will_free_node(c, as, m);
1928
1929         n = bch2_btree_node_alloc(c, b->level, b->btree_id, as, reserve);
1930
1931         n->data->min_key        = prev->data->min_key;
1932         n->data->max_key        = next->data->max_key;
1933         n->data->format         = new_f;
1934         n->key.k.p              = next->key.k.p;
1935
1936         btree_node_set_format(n, new_f);
1937
1938         bch2_btree_sort_into(c, n, prev);
1939         bch2_btree_sort_into(c, n, next);
1940
1941         bch2_btree_build_aux_trees(n);
1942         six_unlock_write(&n->lock);
1943
1944         bkey_init(&delete.k);
1945         delete.k.p = prev->key.k.p;
1946         bch2_keylist_add(&as->parent_keys, &delete);
1947         bch2_keylist_add(&as->parent_keys, &n->key);
1948
1949         bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
1950
1951         bch2_btree_insert_node(parent, iter, &as->parent_keys, reserve, as);
1952
1953         bch2_btree_open_bucket_put(c, n);
1954         bch2_btree_node_free_inmem(iter, b);
1955         bch2_btree_node_free_inmem(iter, m);
1956         bch2_btree_iter_node_replace(iter, n);
1957
1958         bch2_btree_iter_verify(iter, n);
1959
1960         bch2_btree_reserve_put(c, reserve);
1961 out_unlock:
1962         if (ret != -EINTR && ret != -EAGAIN)
1963                 bch2_btree_iter_set_locks_want(iter, 1);
1964         six_unlock_intent(&m->lock);
1965         up_read(&c->gc_lock);
1966 out:
1967         if (ret == -EAGAIN || ret == -EINTR) {
1968                 bch2_btree_iter_unlock(iter);
1969                 ret = -EINTR;
1970         }
1971
1972         closure_sync(&cl);
1973
1974         if (ret == -EINTR) {
1975                 ret = bch2_btree_iter_traverse(iter);
1976                 if (!ret)
1977                         goto retry;
1978         }
1979
1980         return ret;
1981 }
1982
1983 static int inline foreground_maybe_merge(struct btree_iter *iter,
1984                                          enum btree_node_sibling sib)
1985 {
1986         struct bch_fs *c = iter->c;
1987         struct btree *b;
1988
1989         if (!btree_node_locked(iter, iter->level))
1990                 return 0;
1991
1992         b = iter->nodes[iter->level];
1993         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1994                 return 0;
1995
1996         return __foreground_maybe_merge(iter, sib);
1997 }
1998
1999 /**
2000  * btree_insert_key - insert a key one key into a leaf node
2001  */
2002 static enum btree_insert_ret
2003 btree_insert_key(struct btree_insert *trans,
2004                  struct btree_insert_entry *insert)
2005 {
2006         struct bch_fs *c = trans->c;
2007         struct btree_iter *iter = insert->iter;
2008         struct btree *b = iter->nodes[0];
2009         enum btree_insert_ret ret;
2010         int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
2011         int old_live_u64s = b->nr.live_u64s;
2012         int live_u64s_added, u64s_added;
2013
2014         iter->flags &= ~BTREE_ITER_UPTODATE;
2015
2016         ret = !btree_node_is_extents(b)
2017                 ? bch2_insert_fixup_key(trans, insert)
2018                 : bch2_insert_fixup_extent(trans, insert);
2019
2020         live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
2021         u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
2022
2023         if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
2024                 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
2025         if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
2026                 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
2027
2028         if (u64s_added > live_u64s_added &&
2029             bch2_maybe_compact_whiteouts(iter->c, b))
2030                 bch2_btree_iter_reinit_node(iter, b);
2031
2032         trace_btree_insert_key(c, b, insert->k);
2033         return ret;
2034 }
2035
2036 static bool same_leaf_as_prev(struct btree_insert *trans,
2037                               struct btree_insert_entry *i)
2038 {
2039         /*
2040          * Because we sorted the transaction entries, if multiple iterators
2041          * point to the same leaf node they'll always be adjacent now:
2042          */
2043         return i != trans->entries &&
2044                 i[0].iter->nodes[0] == i[-1].iter->nodes[0];
2045 }
2046
2047 #define trans_for_each_entry(trans, i)                                  \
2048         for ((i) = (trans)->entries; (i) < (trans)->entries + (trans)->nr; (i)++)
2049
2050 static void multi_lock_write(struct btree_insert *trans)
2051 {
2052         struct btree_insert_entry *i;
2053
2054         trans_for_each_entry(trans, i)
2055                 if (!same_leaf_as_prev(trans, i))
2056                         btree_node_lock_for_insert(i->iter->nodes[0], i->iter);
2057 }
2058
2059 static void multi_unlock_write(struct btree_insert *trans)
2060 {
2061         struct btree_insert_entry *i;
2062
2063         trans_for_each_entry(trans, i)
2064                 if (!same_leaf_as_prev(trans, i))
2065                         bch2_btree_node_unlock_write(i->iter->nodes[0], i->iter);
2066 }
2067
2068 static int btree_trans_entry_cmp(const void *_l, const void *_r)
2069 {
2070         const struct btree_insert_entry *l = _l;
2071         const struct btree_insert_entry *r = _r;
2072
2073         return btree_iter_cmp(l->iter, r->iter);
2074 }
2075
2076 /* Normal update interface: */
2077
2078 /**
2079  * __bch_btree_insert_at - insert keys at given iterator positions
2080  *
2081  * This is main entry point for btree updates.
2082  *
2083  * Return values:
2084  * -EINTR: locking changed, this function should be called again. Only returned
2085  *  if passed BTREE_INSERT_ATOMIC.
2086  * -EROFS: filesystem read only
2087  * -EIO: journal or btree node IO error
2088  */
2089 int __bch2_btree_insert_at(struct btree_insert *trans)
2090 {
2091         struct bch_fs *c = trans->c;
2092         struct btree_insert_entry *i;
2093         struct btree_iter *split = NULL;
2094         bool cycle_gc_lock = false;
2095         unsigned u64s;
2096         int ret;
2097
2098         trans_for_each_entry(trans, i) {
2099                 BUG_ON(i->iter->level);
2100                 BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
2101         }
2102
2103         sort(trans->entries, trans->nr, sizeof(trans->entries[0]),
2104              btree_trans_entry_cmp, NULL);
2105
2106         if (unlikely(!percpu_ref_tryget(&c->writes)))
2107                 return -EROFS;
2108 retry_locks:
2109         ret = -EINTR;
2110         trans_for_each_entry(trans, i)
2111                 if (!bch2_btree_iter_set_locks_want(i->iter, 1))
2112                         goto err;
2113 retry:
2114         trans->did_work = false;
2115         u64s = 0;
2116         trans_for_each_entry(trans, i)
2117                 if (!i->done)
2118                         u64s += jset_u64s(i->k->k.u64s + i->extra_res);
2119
2120         memset(&trans->journal_res, 0, sizeof(trans->journal_res));
2121
2122         ret = !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)
2123                 ? bch2_journal_res_get(&c->journal,
2124                                       &trans->journal_res,
2125                                       u64s, u64s)
2126                 : 0;
2127         if (ret)
2128                 goto err;
2129
2130         multi_lock_write(trans);
2131
2132         u64s = 0;
2133         trans_for_each_entry(trans, i) {
2134                 /* Multiple inserts might go to same leaf: */
2135                 if (!same_leaf_as_prev(trans, i))
2136                         u64s = 0;
2137
2138                 /*
2139                  * bch2_btree_node_insert_fits() must be called under write lock:
2140                  * with only an intent lock, another thread can still call
2141                  * bch2_btree_node_write(), converting an unwritten bset to a
2142                  * written one
2143                  */
2144                 if (!i->done) {
2145                         u64s += i->k->k.u64s + i->extra_res;
2146                         if (!bch2_btree_node_insert_fits(c,
2147                                         i->iter->nodes[0], u64s)) {
2148                                 split = i->iter;
2149                                 goto unlock;
2150                         }
2151                 }
2152         }
2153
2154         ret = 0;
2155         split = NULL;
2156         cycle_gc_lock = false;
2157
2158         trans_for_each_entry(trans, i) {
2159                 if (i->done)
2160                         continue;
2161
2162                 switch (btree_insert_key(trans, i)) {
2163                 case BTREE_INSERT_OK:
2164                         i->done = true;
2165                         break;
2166                 case BTREE_INSERT_JOURNAL_RES_FULL:
2167                 case BTREE_INSERT_NEED_TRAVERSE:
2168                         ret = -EINTR;
2169                         break;
2170                 case BTREE_INSERT_NEED_RESCHED:
2171                         ret = -EAGAIN;
2172                         break;
2173                 case BTREE_INSERT_BTREE_NODE_FULL:
2174                         split = i->iter;
2175                         break;
2176                 case BTREE_INSERT_ENOSPC:
2177                         ret = -ENOSPC;
2178                         break;
2179                 case BTREE_INSERT_NEED_GC_LOCK:
2180                         cycle_gc_lock = true;
2181                         ret = -EINTR;
2182                         break;
2183                 default:
2184                         BUG();
2185                 }
2186
2187                 if (!trans->did_work && (ret || split))
2188                         break;
2189         }
2190 unlock:
2191         multi_unlock_write(trans);
2192         bch2_journal_res_put(&c->journal, &trans->journal_res);
2193
2194         if (split)
2195                 goto split;
2196         if (ret)
2197                 goto err;
2198
2199         /*
2200          * hack: iterators are inconsistent when they hit end of leaf, until
2201          * traversed again
2202          */
2203         trans_for_each_entry(trans, i)
2204                 if (i->iter->flags & BTREE_ITER_AT_END_OF_LEAF)
2205                         goto out;
2206
2207         trans_for_each_entry(trans, i)
2208                 if (!same_leaf_as_prev(trans, i)) {
2209                         foreground_maybe_merge(i->iter, btree_prev_sib);
2210                         foreground_maybe_merge(i->iter, btree_next_sib);
2211                 }
2212 out:
2213         /* make sure we didn't lose an error: */
2214         if (!ret && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
2215                 trans_for_each_entry(trans, i)
2216                         BUG_ON(!i->done);
2217
2218         percpu_ref_put(&c->writes);
2219         return ret;
2220 split:
2221         /*
2222          * have to drop journal res before splitting, because splitting means
2223          * allocating new btree nodes, and holding a journal reservation
2224          * potentially blocks the allocator:
2225          */
2226         ret = bch2_btree_split_leaf(split, trans->flags);
2227         if (ret)
2228                 goto err;
2229         /*
2230          * if the split didn't have to drop locks the insert will still be
2231          * atomic (in the BTREE_INSERT_ATOMIC sense, what the caller peeked()
2232          * and is overwriting won't have changed)
2233          */
2234         goto retry_locks;
2235 err:
2236         if (cycle_gc_lock) {
2237                 down_read(&c->gc_lock);
2238                 up_read(&c->gc_lock);
2239         }
2240
2241         if (ret == -EINTR) {
2242                 trans_for_each_entry(trans, i) {
2243                         int ret2 = bch2_btree_iter_traverse(i->iter);
2244                         if (ret2) {
2245                                 ret = ret2;
2246                                 goto out;
2247                         }
2248                 }
2249
2250                 /*
2251                  * BTREE_ITER_ATOMIC means we have to return -EINTR if we
2252                  * dropped locks:
2253                  */
2254                 if (!(trans->flags & BTREE_INSERT_ATOMIC))
2255                         goto retry;
2256         }
2257
2258         goto out;
2259 }
2260
2261 int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
2262 {
2263         struct bkey_i k;
2264
2265         bkey_init(&k.k);
2266         k.k.p = iter->pos;
2267
2268         return bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
2269                                     BTREE_INSERT_NOFAIL|
2270                                     BTREE_INSERT_USE_RESERVE|flags,
2271                                     BTREE_INSERT_ENTRY(iter, &k));
2272 }
2273
2274 int bch2_btree_insert_list_at(struct btree_iter *iter,
2275                              struct keylist *keys,
2276                              struct disk_reservation *disk_res,
2277                              struct extent_insert_hook *hook,
2278                              u64 *journal_seq, unsigned flags)
2279 {
2280         BUG_ON(flags & BTREE_INSERT_ATOMIC);
2281         BUG_ON(bch2_keylist_empty(keys));
2282         verify_keys_sorted(keys);
2283
2284         while (!bch2_keylist_empty(keys)) {
2285                 /* need to traverse between each insert */
2286                 int ret = bch2_btree_iter_traverse(iter);
2287                 if (ret)
2288                         return ret;
2289
2290                 ret = bch2_btree_insert_at(iter->c, disk_res, hook,
2291                                 journal_seq, flags,
2292                                 BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
2293                 if (ret)
2294                         return ret;
2295
2296                 bch2_keylist_pop_front(keys);
2297         }
2298
2299         return 0;
2300 }
2301
2302 /**
2303  * bch_btree_insert - insert keys into the extent btree
2304  * @c:                  pointer to struct bch_fs
2305  * @id:                 btree to insert into
2306  * @insert_keys:        list of keys to insert
2307  * @hook:               insert callback
2308  */
2309 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
2310                      struct bkey_i *k,
2311                      struct disk_reservation *disk_res,
2312                      struct extent_insert_hook *hook,
2313                      u64 *journal_seq, int flags)
2314 {
2315         struct btree_iter iter;
2316         int ret, ret2;
2317
2318         bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
2319                              BTREE_ITER_INTENT);
2320
2321         ret = bch2_btree_iter_traverse(&iter);
2322         if (unlikely(ret))
2323                 goto out;
2324
2325         ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
2326                                   BTREE_INSERT_ENTRY(&iter, k));
2327 out:    ret2 = bch2_btree_iter_unlock(&iter);
2328
2329         return ret ?: ret2;
2330 }
2331
2332 /**
2333  * bch_btree_update - like bch2_btree_insert(), but asserts that we're
2334  * overwriting an existing key
2335  */
2336 int bch2_btree_update(struct bch_fs *c, enum btree_id id,
2337                      struct bkey_i *k, u64 *journal_seq)
2338 {
2339         struct btree_iter iter;
2340         struct bkey_s_c u;
2341         int ret;
2342
2343         EBUG_ON(id == BTREE_ID_EXTENTS);
2344
2345         bch2_btree_iter_init(&iter, c, id, k->k.p,
2346                              BTREE_ITER_INTENT);
2347
2348         u = bch2_btree_iter_peek_with_holes(&iter);
2349         ret = btree_iter_err(u);
2350         if (ret)
2351                 return ret;
2352
2353         if (bkey_deleted(u.k)) {
2354                 bch2_btree_iter_unlock(&iter);
2355                 return -ENOENT;
2356         }
2357
2358         ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, 0,
2359                                   BTREE_INSERT_ENTRY(&iter, k));
2360         bch2_btree_iter_unlock(&iter);
2361         return ret;
2362 }
2363
2364 /*
2365  * bch_btree_delete_range - delete everything within a given range
2366  *
2367  * Range is a half open interval - [start, end)
2368  */
2369 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
2370                            struct bpos start,
2371                            struct bpos end,
2372                            struct bversion version,
2373                            struct disk_reservation *disk_res,
2374                            struct extent_insert_hook *hook,
2375                            u64 *journal_seq)
2376 {
2377         struct btree_iter iter;
2378         struct bkey_s_c k;
2379         int ret = 0;
2380
2381         bch2_btree_iter_init(&iter, c, id, start,
2382                              BTREE_ITER_INTENT);
2383
2384         while ((k = bch2_btree_iter_peek(&iter)).k &&
2385                !(ret = btree_iter_err(k))) {
2386                 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
2387                 /* really shouldn't be using a bare, unpadded bkey_i */
2388                 struct bkey_i delete;
2389
2390                 if (bkey_cmp(iter.pos, end) >= 0)
2391                         break;
2392
2393                 bkey_init(&delete.k);
2394
2395                 /*
2396                  * For extents, iter.pos won't necessarily be the same as
2397                  * bkey_start_pos(k.k) (for non extents they always will be the
2398                  * same). It's important that we delete starting from iter.pos
2399                  * because the range we want to delete could start in the middle
2400                  * of k.
2401                  *
2402                  * (bch2_btree_iter_peek() does guarantee that iter.pos >=
2403                  * bkey_start_pos(k.k)).
2404                  */
2405                 delete.k.p = iter.pos;
2406                 delete.k.version = version;
2407
2408                 if (iter.flags & BTREE_ITER_IS_EXTENTS) {
2409                         /*
2410                          * The extents btree is special - KEY_TYPE_DISCARD is
2411                          * used for deletions, not KEY_TYPE_DELETED. This is an
2412                          * internal implementation detail that probably
2413                          * shouldn't be exposed (internally, KEY_TYPE_DELETED is
2414                          * used as a proxy for k->size == 0):
2415                          */
2416                         delete.k.type = KEY_TYPE_DISCARD;
2417
2418                         /* create the biggest key we can */
2419                         bch2_key_resize(&delete.k, max_sectors);
2420                         bch2_cut_back(end, &delete.k);
2421                 }
2422
2423                 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
2424                                           BTREE_INSERT_NOFAIL,
2425                                           BTREE_INSERT_ENTRY(&iter, &delete));
2426                 if (ret)
2427                         break;
2428
2429                 bch2_btree_iter_cond_resched(&iter);
2430         }
2431
2432         bch2_btree_iter_unlock(&iter);
2433         return ret;
2434 }
2435
2436 static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
2437                                 struct btree *b, unsigned flags,
2438                                 struct closure *cl)
2439 {
2440         struct btree *n, *parent = iter->nodes[b->level + 1];
2441         struct btree_reserve *reserve;
2442         struct btree_interior_update *as;
2443
2444         reserve = bch2_btree_reserve_get(c, b, 0, flags, cl);
2445         if (IS_ERR(reserve)) {
2446                 trace_btree_gc_rewrite_node_fail(c, b);
2447                 return PTR_ERR(reserve);
2448         }
2449
2450         as = bch2_btree_interior_update_alloc(c);
2451
2452         bch2_btree_interior_update_will_free_node(c, as, b);
2453
2454         n = bch2_btree_node_alloc_replacement(c, b, as, reserve);
2455
2456         bch2_btree_build_aux_trees(n);
2457         six_unlock_write(&n->lock);
2458
2459         trace_btree_gc_rewrite_node(c, b);
2460
2461         bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
2462
2463         if (parent) {
2464                 bch2_btree_insert_node(parent, iter,
2465                                       &keylist_single(&n->key),
2466                                       reserve, as);
2467         } else {
2468                 bch2_btree_set_root(iter, n, as, reserve);
2469         }
2470
2471         bch2_btree_open_bucket_put(c, n);
2472
2473         bch2_btree_node_free_inmem(iter, b);
2474
2475         BUG_ON(!bch2_btree_iter_node_replace(iter, n));
2476
2477         bch2_btree_reserve_put(c, reserve);
2478         return 0;
2479 }
2480
2481 /**
2482  * bch_btree_node_rewrite - Rewrite/move a btree node
2483  *
2484  * Returns 0 on success, -EINTR or -EAGAIN on failure (i.e.
2485  * btree_check_reserve() has to wait)
2486  */
2487 int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
2488                             __le64 seq, unsigned flags)
2489 {
2490         unsigned locks_want = iter->locks_want;
2491         struct closure cl;
2492         struct btree *b;
2493         int ret;
2494
2495         flags |= BTREE_INSERT_NOFAIL;
2496
2497         closure_init_stack(&cl);
2498
2499         bch2_btree_iter_set_locks_want(iter, U8_MAX);
2500
2501         if (!(flags & BTREE_INSERT_GC_LOCK_HELD)) {
2502                 if (!down_read_trylock(&c->gc_lock)) {
2503                         bch2_btree_iter_unlock(iter);
2504                         down_read(&c->gc_lock);
2505                 }
2506         }
2507
2508         while (1) {
2509                 ret = bch2_btree_iter_traverse(iter);
2510                 if (ret)
2511                         break;
2512
2513                 b = bch2_btree_iter_peek_node(iter);
2514                 if (!b || b->data->keys.seq != seq)
2515                         break;
2516
2517                 ret = __btree_node_rewrite(c, iter, b, flags, &cl);
2518                 if (ret != -EAGAIN &&
2519                     ret != -EINTR)
2520                         break;
2521
2522                 bch2_btree_iter_unlock(iter);
2523                 closure_sync(&cl);
2524         }
2525
2526         bch2_btree_iter_set_locks_want(iter, locks_want);
2527
2528         if (!(flags & BTREE_INSERT_GC_LOCK_HELD))
2529                 up_read(&c->gc_lock);
2530
2531         closure_sync(&cl);
2532         return ret;
2533 }
2534
2535 int bch2_btree_node_update_key(struct bch_fs *c, struct btree *b,
2536                                struct bkey_i_extent *new_key)
2537 {
2538         struct btree_interior_update *as;
2539         struct btree_reserve *reserve = NULL;
2540         struct btree *parent, *new_hash = NULL;
2541         struct btree_iter iter;
2542         struct closure cl;
2543         bool must_rewrite_parent = false;
2544         int ret;
2545
2546         __bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p,
2547                                BTREE_MAX_DEPTH,
2548                                b->level, 0);
2549         closure_init_stack(&cl);
2550
2551         if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
2552                 /* bch2_btree_reserve_get will unlock */
2553                 do {
2554                         ret = bch2_btree_node_cannibalize_lock(c, &cl);
2555                         closure_sync(&cl);
2556                 } while (ret == -EAGAIN);
2557
2558                 BUG_ON(ret);
2559
2560                 new_hash = bch2_btree_node_mem_alloc(c);
2561         }
2562 retry:
2563         reserve = bch2_btree_reserve_get(c, b, 0,
2564                                 BTREE_INSERT_NOFAIL|
2565                                 BTREE_INSERT_USE_RESERVE|
2566                                 BTREE_INSERT_USE_ALLOC_RESERVE,
2567                                 &cl);
2568         closure_sync(&cl);
2569         if (IS_ERR(reserve)) {
2570                 ret = PTR_ERR(reserve);
2571                 if (ret == -EAGAIN || ret == -EINTR)
2572                         goto retry;
2573                 goto err;
2574         }
2575
2576         down_read(&c->gc_lock);
2577
2578         ret = bch2_btree_iter_traverse(&iter);
2579         if (ret)
2580                 goto err;
2581
2582         mutex_lock(&c->btree_interior_update_lock);
2583
2584         /*
2585          * Two corner cases that need to be thought about here:
2586          *
2587          * @b may not be reachable yet - there might be another interior update
2588          * operation waiting on @b to be written, and we're gonna deliver the
2589          * write completion to that interior update operation _before_
2590          * persisting the new_key update
2591          *
2592          * That ends up working without us having to do anything special here:
2593          * the reason is, we do kick off (and do the in memory updates) for the
2594          * update for @new_key before we return, creating a new interior_update
2595          * operation here.
2596          *
2597          * The new interior update operation here will in effect override the
2598          * previous one. The previous one was going to terminate - make @b
2599          * reachable - in one of two ways:
2600          * - updating the btree root pointer
2601          *   In that case,
2602          *   no, this doesn't work. argh.
2603          */
2604
2605         if (b->will_make_reachable)
2606                 must_rewrite_parent = true;
2607
2608         /* other case: btree node being freed */
2609         if (iter.nodes[b->level] != b) {
2610                 /* node has been freed: */
2611                 BUG_ON(btree_node_hashed(b));
2612                 mutex_unlock(&c->btree_interior_update_lock);
2613                 goto err;
2614         }
2615
2616         mutex_unlock(&c->btree_interior_update_lock);
2617
2618         ret = bch2_check_mark_super(c, extent_i_to_s_c(new_key), BCH_DATA_BTREE);
2619         if (ret)
2620                 goto err;
2621
2622         as = bch2_btree_interior_update_alloc(c);
2623
2624         if (must_rewrite_parent)
2625                 as->flags |= BTREE_INTERIOR_UPDATE_MUST_REWRITE;
2626
2627         bch2_btree_interior_update_add_node_reference(c, as, b);
2628
2629         if (new_hash) {
2630                 bkey_copy(&new_hash->key, &new_key->k_i);
2631                 BUG_ON(bch2_btree_node_hash_insert(c, new_hash,
2632                                         b->level, b->btree_id));
2633         }
2634
2635         parent = iter.nodes[b->level + 1];
2636         if (parent) {
2637                 bch2_btree_insert_node(parent, &iter,
2638                                        &keylist_single(&b->key),
2639                                        reserve, as);
2640         } else {
2641                 bch2_btree_set_root(&iter, b, as, reserve);
2642         }
2643
2644         if (new_hash) {
2645                 mutex_lock(&c->btree_cache_lock);
2646                 bch2_btree_node_hash_remove(c, b);
2647
2648                 bkey_copy(&b->key, &new_key->k_i);
2649                 __bch2_btree_node_hash_insert(c, b);
2650
2651                 bch2_btree_node_hash_remove(c, new_hash);
2652                 mutex_unlock(&c->btree_cache_lock);
2653         } else {
2654                 bkey_copy(&b->key, &new_key->k_i);
2655         }
2656 err:
2657         if (!IS_ERR_OR_NULL(reserve))
2658                 bch2_btree_reserve_put(c, reserve);
2659         if (new_hash) {
2660                 mutex_lock(&c->btree_cache_lock);
2661                 list_move(&b->list, &c->btree_cache_freeable);
2662                 mutex_unlock(&c->btree_cache_lock);
2663
2664                 six_unlock_write(&new_hash->lock);
2665                 six_unlock_intent(&new_hash->lock);
2666         }
2667         bch2_btree_iter_unlock(&iter);
2668         up_read(&c->gc_lock);
2669         return ret;
2670 }