]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_update.c
Update bcachefs sources to 2e70771b8d
[bcachefs-tools-debian] / libbcachefs / btree_update.c
1
2 #include "bcachefs.h"
3 #include "alloc.h"
4 #include "bkey_methods.h"
5 #include "btree_cache.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_io.h"
9 #include "btree_iter.h"
10 #include "btree_locking.h"
11 #include "buckets.h"
12 #include "extents.h"
13 #include "journal.h"
14 #include "keylist.h"
15 #include "super-io.h"
16
17 #include <linux/random.h>
18 #include <linux/sort.h>
19 #include <trace/events/bcachefs.h>
20
21 static void btree_interior_update_updated_root(struct bch_fs *,
22                                                struct btree_interior_update *,
23                                                enum btree_id);
24
25 /* Calculate ideal packed bkey format for new btree nodes: */
26
27 void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
28 {
29         struct bkey_packed *k;
30         struct bset_tree *t;
31         struct bkey uk;
32
33         bch2_bkey_format_add_pos(s, b->data->min_key);
34
35         for_each_bset(b, t)
36                 for (k = btree_bkey_first(b, t);
37                      k != btree_bkey_last(b, t);
38                      k = bkey_next(k))
39                         if (!bkey_whiteout(k)) {
40                                 uk = bkey_unpack_key(b, k);
41                                 bch2_bkey_format_add_key(s, &uk);
42                         }
43 }
44
45 static struct bkey_format bch2_btree_calc_format(struct btree *b)
46 {
47         struct bkey_format_state s;
48
49         bch2_bkey_format_init(&s);
50         __bch2_btree_calc_format(&s, b);
51
52         return bch2_bkey_format_done(&s);
53 }
54
55 static size_t btree_node_u64s_with_format(struct btree *b,
56                                           struct bkey_format *new_f)
57 {
58         struct bkey_format *old_f = &b->format;
59
60         /* stupid integer promotion rules */
61         ssize_t delta =
62             (((int) new_f->key_u64s - old_f->key_u64s) *
63              (int) b->nr.packed_keys) +
64             (((int) new_f->key_u64s - BKEY_U64s) *
65              (int) b->nr.unpacked_keys);
66
67         BUG_ON(delta + b->nr.live_u64s < 0);
68
69         return b->nr.live_u64s + delta;
70 }
71
72 /**
73  * btree_node_format_fits - check if we could rewrite node with a new format
74  *
75  * This assumes all keys can pack with the new format -- it just checks if
76  * the re-packed keys would fit inside the node itself.
77  */
78 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
79                                 struct bkey_format *new_f)
80 {
81         size_t u64s = btree_node_u64s_with_format(b, new_f);
82
83         return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
84 }
85
86 /* Btree node freeing/allocation: */
87
88 /*
89  * We're doing the index update that makes @b unreachable, update stuff to
90  * reflect that:
91  *
92  * Must be called _before_ btree_interior_update_updated_root() or
93  * btree_interior_update_updated_btree:
94  */
95 static void bch2_btree_node_free_index(struct bch_fs *c, struct btree *b,
96                                       enum btree_id id, struct bkey_s_c k,
97                                       struct bch_fs_usage *stats)
98 {
99         struct btree_interior_update *as;
100         struct pending_btree_node_free *d;
101
102         mutex_lock(&c->btree_interior_update_lock);
103
104         for_each_pending_btree_node_free(c, as, d)
105                 if (!bkey_cmp(k.k->p, d->key.k.p) &&
106                     bkey_val_bytes(k.k) == bkey_val_bytes(&d->key.k) &&
107                     !memcmp(k.v, &d->key.v, bkey_val_bytes(k.k)))
108                         goto found;
109
110         BUG();
111 found:
112         d->index_update_done = true;
113
114         /*
115          * Btree nodes are accounted as freed in bch_alloc_stats when they're
116          * freed from the index:
117          */
118         stats->s[S_COMPRESSED][S_META]   -= c->sb.btree_node_size;
119         stats->s[S_UNCOMPRESSED][S_META] -= c->sb.btree_node_size;
120
121         /*
122          * We're dropping @k from the btree, but it's still live until the
123          * index update is persistent so we need to keep a reference around for
124          * mark and sweep to find - that's primarily what the
125          * btree_node_pending_free list is for.
126          *
127          * So here (when we set index_update_done = true), we're moving an
128          * existing reference to a different part of the larger "gc keyspace" -
129          * and the new position comes after the old position, since GC marks
130          * the pending free list after it walks the btree.
131          *
132          * If we move the reference while mark and sweep is _between_ the old
133          * and the new position, mark and sweep will see the reference twice
134          * and it'll get double accounted - so check for that here and subtract
135          * to cancel out one of mark and sweep's markings if necessary:
136          */
137
138         /*
139          * bch2_mark_key() compares the current gc pos to the pos we're
140          * moving this reference from, hence one comparison here:
141          */
142         if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
143                 struct bch_fs_usage tmp = { 0 };
144
145                 bch2_mark_key(c, bkey_i_to_s_c(&d->key),
146                              -c->sb.btree_node_size, true, b
147                              ? gc_pos_btree_node(b)
148                              : gc_pos_btree_root(id),
149                              &tmp, 0);
150                 /*
151                  * Don't apply tmp - pending deletes aren't tracked in
152                  * bch_alloc_stats:
153                  */
154         }
155
156         mutex_unlock(&c->btree_interior_update_lock);
157 }
158
159 static void __btree_node_free(struct bch_fs *c, struct btree *b,
160                               struct btree_iter *iter)
161 {
162         trace_btree_node_free(c, b);
163
164         BUG_ON(btree_node_dirty(b));
165         BUG_ON(btree_node_need_write(b));
166         BUG_ON(b == btree_node_root(c, b));
167         BUG_ON(b->ob);
168         BUG_ON(!list_empty(&b->write_blocked));
169         BUG_ON(!list_empty(&b->reachable));
170
171         clear_btree_node_noevict(b);
172
173         six_lock_write(&b->lock);
174
175         bch2_btree_node_hash_remove(c, b);
176
177         mutex_lock(&c->btree_cache_lock);
178         list_move(&b->list, &c->btree_cache_freeable);
179         mutex_unlock(&c->btree_cache_lock);
180
181         /*
182          * By using six_unlock_write() directly instead of
183          * bch2_btree_node_unlock_write(), we don't update the iterator's
184          * sequence numbers and cause future bch2_btree_node_relock() calls to
185          * fail:
186          */
187         six_unlock_write(&b->lock);
188 }
189
190 void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
191 {
192         struct open_bucket *ob = b->ob;
193
194         b->ob = NULL;
195
196         clear_btree_node_dirty(b);
197
198         __btree_node_free(c, b, NULL);
199
200         bch2_open_bucket_put(c, ob);
201 }
202
203 void bch2_btree_node_free_inmem(struct btree_iter *iter, struct btree *b)
204 {
205         bch2_btree_iter_node_drop_linked(iter, b);
206
207         __btree_node_free(iter->c, b, iter);
208
209         bch2_btree_iter_node_drop(iter, b);
210 }
211
212 static void bch2_btree_node_free_ondisk(struct bch_fs *c,
213                                        struct pending_btree_node_free *pending)
214 {
215         struct bch_fs_usage stats = { 0 };
216
217         BUG_ON(!pending->index_update_done);
218
219         bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
220                      -c->sb.btree_node_size, true,
221                      gc_phase(GC_PHASE_PENDING_DELETE),
222                      &stats, 0);
223         /*
224          * Don't apply stats - pending deletes aren't tracked in
225          * bch_alloc_stats:
226          */
227 }
228
229 void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *b)
230 {
231         bch2_open_bucket_put(c, b->ob);
232         b->ob = NULL;
233 }
234
235 static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
236                                             bool use_reserve,
237                                             struct disk_reservation *res,
238                                             struct closure *cl)
239 {
240         BKEY_PADDED(k) tmp;
241         struct open_bucket *ob;
242         struct btree *b;
243         unsigned reserve = use_reserve ? 0 : BTREE_NODE_RESERVE;
244
245         mutex_lock(&c->btree_reserve_cache_lock);
246         if (c->btree_reserve_cache_nr > reserve) {
247                 struct btree_alloc *a =
248                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
249
250                 ob = a->ob;
251                 bkey_copy(&tmp.k, &a->k);
252                 mutex_unlock(&c->btree_reserve_cache_lock);
253                 goto mem_alloc;
254         }
255         mutex_unlock(&c->btree_reserve_cache_lock);
256
257 retry:
258         /* alloc_sectors is weird, I suppose */
259         bkey_extent_init(&tmp.k);
260         tmp.k.k.size = c->sb.btree_node_size,
261
262         ob = bch2_alloc_sectors(c, &c->btree_write_point,
263                                bkey_i_to_extent(&tmp.k),
264                                res->nr_replicas,
265                                c->opts.metadata_replicas_required,
266                                use_reserve ? RESERVE_BTREE : RESERVE_NONE,
267                                cl);
268         if (IS_ERR(ob))
269                 return ERR_CAST(ob);
270
271         if (tmp.k.k.size < c->sb.btree_node_size) {
272                 bch2_open_bucket_put(c, ob);
273                 goto retry;
274         }
275 mem_alloc:
276         b = bch2_btree_node_mem_alloc(c);
277
278         /* we hold cannibalize_lock: */
279         BUG_ON(IS_ERR(b));
280         BUG_ON(b->ob);
281
282         bkey_copy(&b->key, &tmp.k);
283         b->key.k.size = 0;
284         b->ob = ob;
285
286         return b;
287 }
288
289 static struct btree *bch2_btree_node_alloc(struct bch_fs *c,
290                                           unsigned level, enum btree_id id,
291                                           struct btree_reserve *reserve)
292 {
293         struct btree *b;
294
295         BUG_ON(!reserve->nr);
296
297         b = reserve->b[--reserve->nr];
298
299         BUG_ON(bch2_btree_node_hash_insert(c, b, level, id));
300
301         set_btree_node_accessed(b);
302         set_btree_node_dirty(b);
303
304         bch2_bset_init_first(b, &b->data->keys);
305         memset(&b->nr, 0, sizeof(b->nr));
306         b->data->magic = cpu_to_le64(bset_magic(c));
307         b->data->flags = 0;
308         SET_BTREE_NODE_ID(b->data, id);
309         SET_BTREE_NODE_LEVEL(b->data, level);
310         b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr;
311
312         bch2_btree_build_aux_trees(b);
313
314         bch2_check_mark_super(c, &b->key, true);
315
316         trace_btree_node_alloc(c, b);
317         return b;
318 }
319
320 struct btree *__bch2_btree_node_alloc_replacement(struct bch_fs *c,
321                                                   struct btree *b,
322                                                   struct bkey_format format,
323                                                   struct btree_reserve *reserve)
324 {
325         struct btree *n;
326
327         n = bch2_btree_node_alloc(c, b->level, b->btree_id, reserve);
328
329         n->data->min_key        = b->data->min_key;
330         n->data->max_key        = b->data->max_key;
331         n->data->format         = format;
332
333         btree_node_set_format(n, format);
334
335         bch2_btree_sort_into(c, n, b);
336
337         btree_node_reset_sib_u64s(n);
338
339         n->key.k.p = b->key.k.p;
340         return n;
341 }
342
343 static struct btree *bch2_btree_node_alloc_replacement(struct bch_fs *c,
344                                                 struct btree *b,
345                                                 struct btree_reserve *reserve)
346 {
347         struct bkey_format new_f = bch2_btree_calc_format(b);
348
349         /*
350          * The keys might expand with the new format - if they wouldn't fit in
351          * the btree node anymore, use the old format for now:
352          */
353         if (!bch2_btree_node_format_fits(c, b, &new_f))
354                 new_f = b->format;
355
356         return __bch2_btree_node_alloc_replacement(c, b, new_f, reserve);
357 }
358
359 static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b,
360                                      struct btree_reserve *btree_reserve)
361 {
362         struct btree *old = btree_node_root(c, b);
363
364         /* Root nodes cannot be reaped */
365         mutex_lock(&c->btree_cache_lock);
366         list_del_init(&b->list);
367         mutex_unlock(&c->btree_cache_lock);
368
369         mutex_lock(&c->btree_root_lock);
370         btree_node_root(c, b) = b;
371         mutex_unlock(&c->btree_root_lock);
372
373         if (btree_reserve) {
374                 /*
375                  * New allocation (we're not being called because we're in
376                  * bch2_btree_root_read()) - do marking while holding
377                  * btree_root_lock:
378                  */
379                 struct bch_fs_usage stats = { 0 };
380
381                 bch2_mark_key(c, bkey_i_to_s_c(&b->key),
382                              c->sb.btree_node_size, true,
383                              gc_pos_btree_root(b->btree_id),
384                              &stats, 0);
385
386                 if (old)
387                         bch2_btree_node_free_index(c, NULL, old->btree_id,
388                                                   bkey_i_to_s_c(&old->key),
389                                                   &stats);
390                 bch2_fs_usage_apply(c, &stats, &btree_reserve->disk_res,
391                                    gc_pos_btree_root(b->btree_id));
392         }
393
394         bch2_recalc_btree_reserve(c);
395 }
396
397 static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b)
398 {
399         struct btree_root *r = &c->btree_roots[b->btree_id];
400
401         mutex_lock(&c->btree_root_lock);
402
403         BUG_ON(b != r->b);
404         bkey_copy(&r->key, &b->key);
405         r->level = b->level;
406         r->alive = true;
407
408         mutex_unlock(&c->btree_root_lock);
409 }
410
411 /*
412  * Only for filesystem bringup, when first reading the btree roots or allocating
413  * btree roots when initializing a new filesystem:
414  */
415 void bch2_btree_set_root_initial(struct bch_fs *c, struct btree *b,
416                                 struct btree_reserve *btree_reserve)
417 {
418         BUG_ON(btree_node_root(c, b));
419
420         bch2_btree_set_root_inmem(c, b, btree_reserve);
421         bch2_btree_set_root_ondisk(c, b);
422 }
423
424 /**
425  * bch_btree_set_root - update the root in memory and on disk
426  *
427  * To ensure forward progress, the current task must not be holding any
428  * btree node write locks. However, you must hold an intent lock on the
429  * old root.
430  *
431  * Note: This allocates a journal entry but doesn't add any keys to
432  * it.  All the btree roots are part of every journal write, so there
433  * is nothing new to be done.  This just guarantees that there is a
434  * journal write.
435  */
436 static void bch2_btree_set_root(struct btree_iter *iter, struct btree *b,
437                                struct btree_interior_update *as,
438                                struct btree_reserve *btree_reserve)
439 {
440         struct bch_fs *c = iter->c;
441         struct btree *old;
442
443         trace_btree_set_root(c, b);
444         BUG_ON(!b->written);
445
446         old = btree_node_root(c, b);
447
448         /*
449          * Ensure no one is using the old root while we switch to the
450          * new root:
451          */
452         bch2_btree_node_lock_write(old, iter);
453
454         bch2_btree_set_root_inmem(c, b, btree_reserve);
455
456         btree_interior_update_updated_root(c, as, iter->btree_id);
457
458         /*
459          * Unlock old root after new root is visible:
460          *
461          * The new root isn't persistent, but that's ok: we still have
462          * an intent lock on the new root, and any updates that would
463          * depend on the new root would have to update the new root.
464          */
465         bch2_btree_node_unlock_write(old, iter);
466 }
467
468 static struct btree *__btree_root_alloc(struct bch_fs *c, unsigned level,
469                                         enum btree_id id,
470                                         struct btree_reserve *reserve)
471 {
472         struct btree *b = bch2_btree_node_alloc(c, level, id, reserve);
473
474         b->data->min_key = POS_MIN;
475         b->data->max_key = POS_MAX;
476         b->data->format = bch2_btree_calc_format(b);
477         b->key.k.p = POS_MAX;
478
479         btree_node_set_format(b, b->data->format);
480         bch2_btree_build_aux_trees(b);
481
482         six_unlock_write(&b->lock);
483
484         return b;
485 }
486
487 void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
488 {
489         bch2_disk_reservation_put(c, &reserve->disk_res);
490
491         mutex_lock(&c->btree_reserve_cache_lock);
492
493         while (reserve->nr) {
494                 struct btree *b = reserve->b[--reserve->nr];
495
496                 six_unlock_write(&b->lock);
497
498                 if (c->btree_reserve_cache_nr <
499                     ARRAY_SIZE(c->btree_reserve_cache)) {
500                         struct btree_alloc *a =
501                                 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
502
503                         a->ob = b->ob;
504                         b->ob = NULL;
505                         bkey_copy(&a->k, &b->key);
506                 } else {
507                         bch2_open_bucket_put(c, b->ob);
508                         b->ob = NULL;
509                 }
510
511                 __btree_node_free(c, b, NULL);
512
513                 six_unlock_intent(&b->lock);
514         }
515
516         mutex_unlock(&c->btree_reserve_cache_lock);
517
518         mempool_free(reserve, &c->btree_reserve_pool);
519 }
520
521 static struct btree_reserve *__bch2_btree_reserve_get(struct bch_fs *c,
522                                                      unsigned nr_nodes,
523                                                      unsigned flags,
524                                                      struct closure *cl)
525 {
526         struct btree_reserve *reserve;
527         struct btree *b;
528         struct disk_reservation disk_res = { 0, 0 };
529         unsigned sectors = nr_nodes * c->sb.btree_node_size;
530         int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD|
531                 BCH_DISK_RESERVATION_METADATA;
532
533         if (flags & BTREE_INSERT_NOFAIL)
534                 disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
535
536         /*
537          * This check isn't necessary for correctness - it's just to potentially
538          * prevent us from doing a lot of work that'll end up being wasted:
539          */
540         ret = bch2_journal_error(&c->journal);
541         if (ret)
542                 return ERR_PTR(ret);
543
544         if (bch2_disk_reservation_get(c, &disk_res, sectors, disk_res_flags))
545                 return ERR_PTR(-ENOSPC);
546
547         BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
548
549         /*
550          * Protects reaping from the btree node cache and using the btree node
551          * open bucket reserve:
552          */
553         ret = bch2_btree_node_cannibalize_lock(c, cl);
554         if (ret) {
555                 bch2_disk_reservation_put(c, &disk_res);
556                 return ERR_PTR(ret);
557         }
558
559         reserve = mempool_alloc(&c->btree_reserve_pool, GFP_NOIO);
560
561         reserve->disk_res = disk_res;
562         reserve->nr = 0;
563
564         while (reserve->nr < nr_nodes) {
565                 b = __bch2_btree_node_alloc(c, flags & BTREE_INSERT_USE_RESERVE,
566                                            &disk_res, cl);
567                 if (IS_ERR(b)) {
568                         ret = PTR_ERR(b);
569                         goto err_free;
570                 }
571
572                 reserve->b[reserve->nr++] = b;
573         }
574
575         bch2_btree_node_cannibalize_unlock(c);
576         return reserve;
577 err_free:
578         bch2_btree_reserve_put(c, reserve);
579         bch2_btree_node_cannibalize_unlock(c);
580         trace_btree_reserve_get_fail(c, nr_nodes, cl);
581         return ERR_PTR(ret);
582 }
583
584 struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
585                                             struct btree *b,
586                                             unsigned extra_nodes,
587                                             unsigned flags,
588                                             struct closure *cl)
589 {
590         unsigned depth = btree_node_root(c, b)->level - b->level;
591         unsigned nr_nodes = btree_reserve_required_nodes(depth) + extra_nodes;
592
593         return __bch2_btree_reserve_get(c, nr_nodes, flags, cl);
594 }
595
596 int bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id,
597                          struct closure *writes)
598 {
599         struct closure cl;
600         struct btree_reserve *reserve;
601         struct btree *b;
602         LIST_HEAD(reachable_list);
603
604         closure_init_stack(&cl);
605
606         while (1) {
607                 /* XXX haven't calculated capacity yet :/ */
608                 reserve = __bch2_btree_reserve_get(c, 1, 0, &cl);
609                 if (!IS_ERR(reserve))
610                         break;
611
612                 if (PTR_ERR(reserve) == -ENOSPC)
613                         return PTR_ERR(reserve);
614
615                 closure_sync(&cl);
616         }
617
618         b = __btree_root_alloc(c, 0, id, reserve);
619         list_add(&b->reachable, &reachable_list);
620
621         bch2_btree_node_write(c, b, writes, SIX_LOCK_intent);
622
623         bch2_btree_set_root_initial(c, b, reserve);
624         bch2_btree_open_bucket_put(c, b);
625
626         list_del_init(&b->reachable);
627         six_unlock_intent(&b->lock);
628
629         bch2_btree_reserve_put(c, reserve);
630
631         return 0;
632 }
633
634 static void bch2_insert_fixup_btree_ptr(struct btree_iter *iter,
635                                        struct btree *b,
636                                        struct bkey_i *insert,
637                                        struct btree_node_iter *node_iter,
638                                        struct disk_reservation *disk_res)
639 {
640         struct bch_fs *c = iter->c;
641         struct bch_fs_usage stats = { 0 };
642         struct bkey_packed *k;
643         struct bkey tmp;
644
645         if (bkey_extent_is_data(&insert->k))
646                 bch2_mark_key(c, bkey_i_to_s_c(insert),
647                              c->sb.btree_node_size, true,
648                              gc_pos_btree_node(b), &stats, 0);
649
650         while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
651                !btree_iter_pos_cmp_packed(b, &insert->k.p, k, false))
652                 bch2_btree_node_iter_advance(node_iter, b);
653
654         /*
655          * If we're overwriting, look up pending delete and mark so that gc
656          * marks it on the pending delete list:
657          */
658         if (k && !bkey_cmp_packed(b, k, &insert->k))
659                 bch2_btree_node_free_index(c, b, iter->btree_id,
660                                           bkey_disassemble(b, k, &tmp),
661                                           &stats);
662
663         bch2_fs_usage_apply(c, &stats, disk_res, gc_pos_btree_node(b));
664
665         bch2_btree_bset_insert_key(iter, b, node_iter, insert);
666         set_btree_node_dirty(b);
667         set_btree_node_need_write(b);
668 }
669
670 /* Inserting into a given leaf node (last stage of insert): */
671
672 /* Handle overwrites and do insert, for non extents: */
673 bool bch2_btree_bset_insert_key(struct btree_iter *iter,
674                                struct btree *b,
675                                struct btree_node_iter *node_iter,
676                                struct bkey_i *insert)
677 {
678         const struct bkey_format *f = &b->format;
679         struct bkey_packed *k;
680         struct bset_tree *t;
681         unsigned clobber_u64s;
682
683         EBUG_ON(btree_node_just_written(b));
684         EBUG_ON(bset_written(b, btree_bset_last(b)));
685         EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
686         EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
687                 bkey_cmp(insert->k.p, b->data->max_key) > 0);
688         BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(iter->c, b));
689
690         k = bch2_btree_node_iter_peek_all(node_iter, b);
691         if (k && !bkey_cmp_packed(b, k, &insert->k)) {
692                 BUG_ON(bkey_whiteout(k));
693
694                 t = bch2_bkey_to_bset(b, k);
695
696                 if (bset_unwritten(b, bset(b, t)) &&
697                     bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) {
698                         BUG_ON(bkey_whiteout(k) != bkey_whiteout(&insert->k));
699
700                         k->type = insert->k.type;
701                         memcpy_u64s(bkeyp_val(f, k), &insert->v,
702                                     bkey_val_u64s(&insert->k));
703                         return true;
704                 }
705
706                 insert->k.needs_whiteout = k->needs_whiteout;
707
708                 btree_keys_account_key_drop(&b->nr, t - b->set, k);
709
710                 if (t == bset_tree_last(b)) {
711                         clobber_u64s = k->u64s;
712
713                         /*
714                          * If we're deleting, and the key we're deleting doesn't
715                          * need a whiteout (it wasn't overwriting a key that had
716                          * been written to disk) - just delete it:
717                          */
718                         if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
719                                 bch2_bset_delete(b, k, clobber_u64s);
720                                 bch2_btree_node_iter_fix(iter, b, node_iter, t,
721                                                         k, clobber_u64s, 0);
722                                 return true;
723                         }
724
725                         goto overwrite;
726                 }
727
728                 k->type = KEY_TYPE_DELETED;
729                 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
730                                         k->u64s, k->u64s);
731
732                 if (bkey_whiteout(&insert->k)) {
733                         reserve_whiteout(b, t, k);
734                         return true;
735                 } else {
736                         k->needs_whiteout = false;
737                 }
738         } else {
739                 /*
740                  * Deleting, but the key to delete wasn't found - nothing to do:
741                  */
742                 if (bkey_whiteout(&insert->k))
743                         return false;
744
745                 insert->k.needs_whiteout = false;
746         }
747
748         t = bset_tree_last(b);
749         k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
750         clobber_u64s = 0;
751 overwrite:
752         bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
753         if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
754                 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
755                                         clobber_u64s, k->u64s);
756         return true;
757 }
758
759 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
760                                unsigned i, u64 seq)
761 {
762         struct bch_fs *c = container_of(j, struct bch_fs, journal);
763         struct btree_write *w = container_of(pin, struct btree_write, journal);
764         struct btree *b = container_of(w, struct btree, writes[i]);
765
766         six_lock_read(&b->lock);
767         bch2_btree_node_write_dirty(c, b, NULL,
768                         (btree_current_write(b) == w &&
769                          w->journal.pin_list == journal_seq_pin(j, seq)));
770         six_unlock_read(&b->lock);
771 }
772
773 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
774 {
775         return __btree_node_flush(j, pin, 0, seq);
776 }
777
778 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
779 {
780         return __btree_node_flush(j, pin, 1, seq);
781 }
782
783 void bch2_btree_journal_key(struct btree_insert *trans,
784                            struct btree_iter *iter,
785                            struct bkey_i *insert)
786 {
787         struct bch_fs *c = trans->c;
788         struct journal *j = &c->journal;
789         struct btree *b = iter->nodes[0];
790         struct btree_write *w = btree_current_write(b);
791
792         EBUG_ON(iter->level || b->level);
793         EBUG_ON(!trans->journal_res.ref &&
794                 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
795
796         if (!journal_pin_active(&w->journal))
797                 bch2_journal_pin_add(j, &trans->journal_res,
798                                      &w->journal,
799                                      btree_node_write_idx(b) == 0
800                                      ? btree_node_flush0
801                                      : btree_node_flush1);
802
803         if (trans->journal_res.ref) {
804                 u64 seq = trans->journal_res.seq;
805                 bool needs_whiteout = insert->k.needs_whiteout;
806
807                 /* ick */
808                 insert->k.needs_whiteout = false;
809                 bch2_journal_add_keys(j, &trans->journal_res,
810                                      b->btree_id, insert);
811                 insert->k.needs_whiteout = needs_whiteout;
812
813                 if (trans->journal_seq)
814                         *trans->journal_seq = seq;
815                 btree_bset_last(b)->journal_seq = cpu_to_le64(seq);
816         }
817
818         if (!btree_node_dirty(b))
819                 set_btree_node_dirty(b);
820 }
821
822 static enum btree_insert_ret
823 bch2_insert_fixup_key(struct btree_insert *trans,
824                      struct btree_insert_entry *insert)
825 {
826         struct btree_iter *iter = insert->iter;
827
828         BUG_ON(iter->level);
829
830         if (bch2_btree_bset_insert_key(iter,
831                                       iter->nodes[0],
832                                       &iter->node_iters[0],
833                                       insert->k))
834                 bch2_btree_journal_key(trans, iter, insert->k);
835
836         trans->did_work = true;
837         return BTREE_INSERT_OK;
838 }
839
840 static void verify_keys_sorted(struct keylist *l)
841 {
842 #ifdef CONFIG_BCACHEFS_DEBUG
843         struct bkey_i *k;
844
845         for_each_keylist_key(l, k)
846                 BUG_ON(bkey_next(k) != l->top &&
847                        bkey_cmp(k->k.p, bkey_next(k)->k.p) >= 0);
848 #endif
849 }
850
851 static void btree_node_lock_for_insert(struct btree *b, struct btree_iter *iter)
852 {
853         struct bch_fs *c = iter->c;
854
855         bch2_btree_node_lock_write(b, iter);
856
857         if (btree_node_just_written(b) &&
858             bch2_btree_post_write_cleanup(c, b))
859                 bch2_btree_iter_reinit_node(iter, b);
860
861         /*
862          * If the last bset has been written, or if it's gotten too big - start
863          * a new bset to insert into:
864          */
865         if (want_new_bset(c, b))
866                 bch2_btree_init_next(c, b, iter);
867 }
868
869 /* Asynchronous interior node update machinery */
870
871 struct btree_interior_update *
872 bch2_btree_interior_update_alloc(struct bch_fs *c)
873 {
874         struct btree_interior_update *as;
875
876         as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
877         memset(as, 0, sizeof(*as));
878         closure_init(&as->cl, &c->cl);
879         as->c           = c;
880         as->mode        = BTREE_INTERIOR_NO_UPDATE;
881         INIT_LIST_HEAD(&as->write_blocked_list);
882         INIT_LIST_HEAD(&as->reachable_list);
883
884         bch2_keylist_init(&as->parent_keys, as->inline_keys,
885                          ARRAY_SIZE(as->inline_keys));
886
887         mutex_lock(&c->btree_interior_update_lock);
888         list_add(&as->list, &c->btree_interior_update_list);
889         mutex_unlock(&c->btree_interior_update_lock);
890
891         return as;
892 }
893
894 static void btree_interior_update_free(struct closure *cl)
895 {
896         struct btree_interior_update *as =
897                 container_of(cl, struct btree_interior_update, cl);
898
899         mempool_free(as, &as->c->btree_interior_update_pool);
900 }
901
902 static void btree_interior_update_nodes_reachable(struct closure *cl)
903 {
904         struct btree_interior_update *as =
905                 container_of(cl, struct btree_interior_update, cl);
906         struct bch_fs *c = as->c;
907         unsigned i;
908
909         bch2_journal_pin_drop(&c->journal, &as->journal);
910
911         mutex_lock(&c->btree_interior_update_lock);
912
913         while (!list_empty(&as->reachable_list)) {
914                 struct btree *b = list_first_entry(&as->reachable_list,
915                                                    struct btree, reachable);
916                 list_del_init(&b->reachable);
917                 mutex_unlock(&c->btree_interior_update_lock);
918
919                 six_lock_read(&b->lock);
920                 bch2_btree_node_write_dirty(c, b, NULL, btree_node_need_write(b));
921                 six_unlock_read(&b->lock);
922                 mutex_lock(&c->btree_interior_update_lock);
923         }
924
925         for (i = 0; i < as->nr_pending; i++)
926                 bch2_btree_node_free_ondisk(c, &as->pending[i]);
927         as->nr_pending = 0;
928
929         list_del(&as->list);
930         mutex_unlock(&c->btree_interior_update_lock);
931
932         closure_wake_up(&as->wait);
933
934         closure_return_with_destructor(cl, btree_interior_update_free);
935 }
936
937 static void btree_interior_update_nodes_written(struct closure *cl)
938 {
939         struct btree_interior_update *as =
940                 container_of(cl, struct btree_interior_update, cl);
941         struct bch_fs *c = as->c;
942         struct btree *b;
943
944         if (bch2_journal_error(&c->journal)) {
945                 /* XXX what? */
946                 /* we don't want to free the nodes on disk, that's what */
947         }
948
949         /* XXX: missing error handling, damnit */
950
951         /* check for journal error, bail out if we flushed */
952
953         /*
954          * We did an update to a parent node where the pointers we added pointed
955          * to child nodes that weren't written yet: now, the child nodes have
956          * been written so we can write out the update to the interior node.
957          */
958 retry:
959         mutex_lock(&c->btree_interior_update_lock);
960         switch (as->mode) {
961         case BTREE_INTERIOR_NO_UPDATE:
962                 BUG();
963         case BTREE_INTERIOR_UPDATING_NODE:
964                 /* The usual case: */
965                 b = READ_ONCE(as->b);
966
967                 if (!six_trylock_read(&b->lock)) {
968                         mutex_unlock(&c->btree_interior_update_lock);
969                         six_lock_read(&b->lock);
970                         six_unlock_read(&b->lock);
971                         goto retry;
972                 }
973
974                 BUG_ON(!btree_node_dirty(b));
975                 closure_wait(&btree_current_write(b)->wait, cl);
976
977                 list_del(&as->write_blocked_list);
978                 mutex_unlock(&c->btree_interior_update_lock);
979
980                 bch2_btree_node_write_dirty(c, b, NULL,
981                                             btree_node_need_write(b));
982                 six_unlock_read(&b->lock);
983                 break;
984
985         case BTREE_INTERIOR_UPDATING_AS:
986                 /*
987                  * The btree node we originally updated has been freed and is
988                  * being rewritten - so we need to write anything here, we just
989                  * need to signal to that btree_interior_update that it's ok to make the
990                  * new replacement node visible:
991                  */
992                 closure_put(&as->parent_as->cl);
993
994                 /*
995                  * and then we have to wait on that btree_interior_update to finish:
996                  */
997                 closure_wait(&as->parent_as->wait, cl);
998                 mutex_unlock(&c->btree_interior_update_lock);
999                 break;
1000
1001         case BTREE_INTERIOR_UPDATING_ROOT:
1002                 /* b is the new btree root: */
1003                 b = READ_ONCE(as->b);
1004
1005                 if (!six_trylock_read(&b->lock)) {
1006                         mutex_unlock(&c->btree_interior_update_lock);
1007                         six_lock_read(&b->lock);
1008                         six_unlock_read(&b->lock);
1009                         goto retry;
1010                 }
1011
1012                 BUG_ON(c->btree_roots[b->btree_id].as != as);
1013                 c->btree_roots[b->btree_id].as = NULL;
1014
1015                 bch2_btree_set_root_ondisk(c, b);
1016
1017                 /*
1018                  * We don't have to wait anything anything here (before
1019                  * btree_interior_update_nodes_reachable frees the old nodes
1020                  * ondisk) - we've ensured that the very next journal write will
1021                  * have the pointer to the new root, and before the allocator
1022                  * can reuse the old nodes it'll have to do a journal commit:
1023                  */
1024                 six_unlock_read(&b->lock);
1025                 mutex_unlock(&c->btree_interior_update_lock);
1026                 break;
1027         }
1028
1029         continue_at(cl, btree_interior_update_nodes_reachable, system_wq);
1030 }
1031
1032 /*
1033  * We're updating @b with pointers to nodes that haven't finished writing yet:
1034  * block @b from being written until @as completes
1035  */
1036 static void btree_interior_update_updated_btree(struct bch_fs *c,
1037                                                 struct btree_interior_update *as,
1038                                                 struct btree *b)
1039 {
1040         mutex_lock(&c->btree_interior_update_lock);
1041
1042         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1043         BUG_ON(!btree_node_dirty(b));
1044
1045         as->mode = BTREE_INTERIOR_UPDATING_NODE;
1046         as->b = b;
1047         list_add(&as->write_blocked_list, &b->write_blocked);
1048
1049         mutex_unlock(&c->btree_interior_update_lock);
1050
1051         bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
1052
1053         continue_at(&as->cl, btree_interior_update_nodes_written,
1054                     system_freezable_wq);
1055 }
1056
1057 static void btree_interior_update_reparent(struct btree_interior_update *as,
1058                                            struct btree_interior_update *child)
1059 {
1060         child->b = NULL;
1061         child->mode = BTREE_INTERIOR_UPDATING_AS;
1062         child->parent_as = as;
1063         closure_get(&as->cl);
1064 }
1065
1066 static void btree_interior_update_updated_root(struct bch_fs *c,
1067                                                struct btree_interior_update *as,
1068                                                enum btree_id btree_id)
1069 {
1070         struct btree_root *r = &c->btree_roots[btree_id];
1071
1072         mutex_lock(&c->btree_interior_update_lock);
1073
1074         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1075
1076         /*
1077          * Old root might not be persistent yet - if so, redirect its
1078          * btree_interior_update operation to point to us:
1079          */
1080         if (r->as)
1081                 btree_interior_update_reparent(as, r->as);
1082
1083         as->mode = BTREE_INTERIOR_UPDATING_ROOT;
1084         as->b = r->b;
1085         r->as = as;
1086
1087         mutex_unlock(&c->btree_interior_update_lock);
1088
1089         continue_at(&as->cl, btree_interior_update_nodes_written,
1090                     system_freezable_wq);
1091 }
1092
1093 static void interior_update_flush(struct journal *j,
1094                         struct journal_entry_pin *pin, u64 seq)
1095 {
1096         struct btree_interior_update *as =
1097                 container_of(pin, struct btree_interior_update, journal);
1098
1099         bch2_journal_flush_seq_async(j, as->journal_seq, NULL);
1100 }
1101
1102 /*
1103  * @b is being split/rewritten: it may have pointers to not-yet-written btree
1104  * nodes and thus outstanding btree_interior_updates - redirect @b's
1105  * btree_interior_updates to point to this btree_interior_update:
1106  */
1107 void bch2_btree_interior_update_will_free_node(struct bch_fs *c,
1108                                               struct btree_interior_update *as,
1109                                               struct btree *b)
1110 {
1111         struct closure *cl, *cl_n;
1112         struct btree_interior_update *p, *n;
1113         struct pending_btree_node_free *d;
1114         struct btree_write *w;
1115         struct bset_tree *t;
1116
1117         /*
1118          * Does this node have data that hasn't been written in the journal?
1119          *
1120          * If so, we have to wait for the corresponding journal entry to be
1121          * written before making the new nodes reachable - we can't just carry
1122          * over the bset->journal_seq tracking, since we'll be mixing those keys
1123          * in with keys that aren't in the journal anymore:
1124          */
1125         for_each_bset(b, t)
1126                 as->journal_seq = max(as->journal_seq, bset(b, t)->journal_seq);
1127
1128         mutex_lock(&c->btree_interior_update_lock);
1129
1130         /* Add this node to the list of nodes being freed: */
1131         BUG_ON(as->nr_pending >= ARRAY_SIZE(as->pending));
1132
1133         d = &as->pending[as->nr_pending++];
1134         d->index_update_done    = false;
1135         d->seq                  = b->data->keys.seq;
1136         d->btree_id             = b->btree_id;
1137         d->level                = b->level;
1138         bkey_copy(&d->key, &b->key);
1139
1140         /*
1141          * Does this node have any btree_interior_update operations preventing
1142          * it from being written?
1143          *
1144          * If so, redirect them to point to this btree_interior_update: we can
1145          * write out our new nodes, but we won't make them visible until those
1146          * operations complete
1147          */
1148         list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
1149                 list_del(&p->write_blocked_list);
1150                 btree_interior_update_reparent(as, p);
1151         }
1152
1153         clear_btree_node_dirty(b);
1154         clear_btree_node_need_write(b);
1155         w = btree_current_write(b);
1156
1157         llist_for_each_entry_safe(cl, cl_n, llist_del_all(&w->wait.list), list)
1158                 llist_add(&cl->list, &as->wait.list);
1159
1160         /*
1161          * Does this node have unwritten data that has a pin on the journal?
1162          *
1163          * If so, transfer that pin to the btree_interior_update operation -
1164          * note that if we're freeing multiple nodes, we only need to keep the
1165          * oldest pin of any of the nodes we're freeing. We'll release the pin
1166          * when the new nodes are persistent and reachable on disk:
1167          */
1168         bch2_journal_pin_add_if_older(&c->journal, &w->journal,
1169                                       &as->journal, interior_update_flush);
1170         bch2_journal_pin_drop(&c->journal, &w->journal);
1171
1172         if (!list_empty(&b->reachable))
1173                 list_del_init(&b->reachable);
1174
1175         mutex_unlock(&c->btree_interior_update_lock);
1176 }
1177
1178 static void btree_node_interior_verify(struct btree *b)
1179 {
1180         struct btree_node_iter iter;
1181         struct bkey_packed *k;
1182
1183         BUG_ON(!b->level);
1184
1185         bch2_btree_node_iter_init(&iter, b, b->key.k.p, false, false);
1186 #if 1
1187         BUG_ON(!(k = bch2_btree_node_iter_peek(&iter, b)) ||
1188                bkey_cmp_left_packed(b, k, &b->key.k.p));
1189
1190         BUG_ON((bch2_btree_node_iter_advance(&iter, b),
1191                 !bch2_btree_node_iter_end(&iter)));
1192 #else
1193         const char *msg;
1194
1195         msg = "not found";
1196         k = bch2_btree_node_iter_peek(&iter, b);
1197         if (!k)
1198                 goto err;
1199
1200         msg = "isn't what it should be";
1201         if (bkey_cmp_left_packed(b, k, &b->key.k.p))
1202                 goto err;
1203
1204         bch2_btree_node_iter_advance(&iter, b);
1205
1206         msg = "isn't last key";
1207         if (!bch2_btree_node_iter_end(&iter))
1208                 goto err;
1209         return;
1210 err:
1211         bch2_dump_btree_node(b);
1212         printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode,
1213                b->key.k.p.offset, msg);
1214         BUG();
1215 #endif
1216 }
1217
1218 static enum btree_insert_ret
1219 bch2_btree_insert_keys_interior(struct btree *b,
1220                                struct btree_iter *iter,
1221                                struct keylist *insert_keys,
1222                                struct btree_interior_update *as,
1223                                struct btree_reserve *res)
1224 {
1225         struct bch_fs *c = iter->c;
1226         struct btree_iter *linked;
1227         struct btree_node_iter node_iter;
1228         struct bkey_i *insert = bch2_keylist_front(insert_keys);
1229         struct bkey_packed *k;
1230
1231         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1232         BUG_ON(!b->level);
1233         BUG_ON(!as || as->b);
1234         verify_keys_sorted(insert_keys);
1235
1236         btree_node_lock_for_insert(b, iter);
1237
1238         if (bch_keylist_u64s(insert_keys) >
1239             bch_btree_keys_u64s_remaining(c, b)) {
1240                 bch2_btree_node_unlock_write(b, iter);
1241                 return BTREE_INSERT_BTREE_NODE_FULL;
1242         }
1243
1244         /* Don't screw up @iter's position: */
1245         node_iter = iter->node_iters[b->level];
1246
1247         /*
1248          * btree_split(), btree_gc_coalesce() will insert keys before
1249          * the iterator's current position - they know the keys go in
1250          * the node the iterator points to:
1251          */
1252         while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
1253                (bkey_cmp_packed(b, k, &insert->k) >= 0))
1254                 ;
1255
1256         while (!bch2_keylist_empty(insert_keys)) {
1257                 insert = bch2_keylist_front(insert_keys);
1258
1259                 bch2_insert_fixup_btree_ptr(iter, b, insert,
1260                                            &node_iter, &res->disk_res);
1261                 bch2_keylist_pop_front(insert_keys);
1262         }
1263
1264         btree_interior_update_updated_btree(c, as, b);
1265
1266         for_each_linked_btree_node(iter, b, linked)
1267                 bch2_btree_node_iter_peek(&linked->node_iters[b->level],
1268                                          b);
1269         bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
1270
1271         bch2_btree_iter_verify(iter, b);
1272
1273         if (bch2_maybe_compact_whiteouts(c, b))
1274                 bch2_btree_iter_reinit_node(iter, b);
1275
1276         bch2_btree_node_unlock_write(b, iter);
1277
1278         btree_node_interior_verify(b);
1279         return BTREE_INSERT_OK;
1280 }
1281
1282 /*
1283  * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1284  * node)
1285  */
1286 static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n1,
1287                                         struct btree_reserve *reserve,
1288                                         struct btree_interior_update *as)
1289 {
1290         size_t nr_packed = 0, nr_unpacked = 0;
1291         struct btree *n2;
1292         struct bset *set1, *set2;
1293         struct bkey_packed *k, *prev = NULL;
1294
1295         n2 = bch2_btree_node_alloc(iter->c, n1->level, iter->btree_id, reserve);
1296         list_add(&n2->reachable, &as->reachable_list);
1297
1298         n2->data->max_key       = n1->data->max_key;
1299         n2->data->format        = n1->format;
1300         n2->key.k.p = n1->key.k.p;
1301
1302         btree_node_set_format(n2, n2->data->format);
1303
1304         set1 = btree_bset_first(n1);
1305         set2 = btree_bset_first(n2);
1306
1307         /*
1308          * Has to be a linear search because we don't have an auxiliary
1309          * search tree yet
1310          */
1311         k = set1->start;
1312         while (1) {
1313                 if (bkey_next(k) == vstruct_last(set1))
1314                         break;
1315                 if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
1316                         break;
1317
1318                 if (bkey_packed(k))
1319                         nr_packed++;
1320                 else
1321                         nr_unpacked++;
1322
1323                 prev = k;
1324                 k = bkey_next(k);
1325         }
1326
1327         BUG_ON(!prev);
1328
1329         n1->key.k.p = bkey_unpack_pos(n1, prev);
1330         n1->data->max_key = n1->key.k.p;
1331         n2->data->min_key =
1332                 btree_type_successor(n1->btree_id, n1->key.k.p);
1333
1334         set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
1335         set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
1336
1337         set_btree_bset_end(n1, n1->set);
1338         set_btree_bset_end(n2, n2->set);
1339
1340         n2->nr.live_u64s        = le16_to_cpu(set2->u64s);
1341         n2->nr.bset_u64s[0]     = le16_to_cpu(set2->u64s);
1342         n2->nr.packed_keys      = n1->nr.packed_keys - nr_packed;
1343         n2->nr.unpacked_keys    = n1->nr.unpacked_keys - nr_unpacked;
1344
1345         n1->nr.live_u64s        = le16_to_cpu(set1->u64s);
1346         n1->nr.bset_u64s[0]     = le16_to_cpu(set1->u64s);
1347         n1->nr.packed_keys      = nr_packed;
1348         n1->nr.unpacked_keys    = nr_unpacked;
1349
1350         BUG_ON(!set1->u64s);
1351         BUG_ON(!set2->u64s);
1352
1353         memcpy_u64s(set2->start,
1354                     vstruct_end(set1),
1355                     le16_to_cpu(set2->u64s));
1356
1357         btree_node_reset_sib_u64s(n1);
1358         btree_node_reset_sib_u64s(n2);
1359
1360         bch2_verify_btree_nr_keys(n1);
1361         bch2_verify_btree_nr_keys(n2);
1362
1363         if (n1->level) {
1364                 btree_node_interior_verify(n1);
1365                 btree_node_interior_verify(n2);
1366         }
1367
1368         return n2;
1369 }
1370
1371 /*
1372  * For updates to interior nodes, we've got to do the insert before we split
1373  * because the stuff we're inserting has to be inserted atomically. Post split,
1374  * the keys might have to go in different nodes and the split would no longer be
1375  * atomic.
1376  *
1377  * Worse, if the insert is from btree node coalescing, if we do the insert after
1378  * we do the split (and pick the pivot) - the pivot we pick might be between
1379  * nodes that were coalesced, and thus in the middle of a child node post
1380  * coalescing:
1381  */
1382 static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b,
1383                                     struct keylist *keys,
1384                                     struct btree_reserve *res)
1385 {
1386         struct btree_node_iter node_iter;
1387         struct bkey_i *k = bch2_keylist_front(keys);
1388         struct bkey_packed *p;
1389         struct bset *i;
1390
1391         BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
1392
1393         bch2_btree_node_iter_init(&node_iter, b, k->k.p, false, false);
1394
1395         while (!bch2_keylist_empty(keys)) {
1396                 k = bch2_keylist_front(keys);
1397
1398                 BUG_ON(bch_keylist_u64s(keys) >
1399                        bch_btree_keys_u64s_remaining(iter->c, b));
1400                 BUG_ON(bkey_cmp(k->k.p, b->data->min_key) < 0);
1401                 BUG_ON(bkey_cmp(k->k.p, b->data->max_key) > 0);
1402
1403                 bch2_insert_fixup_btree_ptr(iter, b, k, &node_iter, &res->disk_res);
1404                 bch2_keylist_pop_front(keys);
1405         }
1406
1407         /*
1408          * We can't tolerate whiteouts here - with whiteouts there can be
1409          * duplicate keys, and it would be rather bad if we picked a duplicate
1410          * for the pivot:
1411          */
1412         i = btree_bset_first(b);
1413         p = i->start;
1414         while (p != vstruct_last(i))
1415                 if (bkey_deleted(p)) {
1416                         le16_add_cpu(&i->u64s, -p->u64s);
1417                         set_btree_bset_end(b, b->set);
1418                         memmove_u64s_down(p, bkey_next(p),
1419                                           (u64 *) vstruct_last(i) -
1420                                           (u64 *) p);
1421                 } else
1422                         p = bkey_next(p);
1423
1424         BUG_ON(b->nsets != 1 ||
1425                b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
1426
1427         btree_node_interior_verify(b);
1428 }
1429
1430 static void btree_split(struct btree *b, struct btree_iter *iter,
1431                         struct keylist *insert_keys,
1432                         struct btree_reserve *reserve,
1433                         struct btree_interior_update *as)
1434 {
1435         struct bch_fs *c = iter->c;
1436         struct btree *parent = iter->nodes[b->level + 1];
1437         struct btree *n1, *n2 = NULL, *n3 = NULL;
1438         u64 start_time = local_clock();
1439
1440         BUG_ON(!parent && (b != btree_node_root(c, b)));
1441         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1442
1443         bch2_btree_interior_update_will_free_node(c, as, b);
1444
1445         n1 = bch2_btree_node_alloc_replacement(c, b, reserve);
1446         list_add(&n1->reachable, &as->reachable_list);
1447
1448         if (b->level)
1449                 btree_split_insert_keys(iter, n1, insert_keys, reserve);
1450
1451         if (vstruct_blocks(n1->data, c->block_bits) > BTREE_SPLIT_THRESHOLD(c)) {
1452                 trace_btree_node_split(c, b, b->nr.live_u64s);
1453
1454                 n2 = __btree_split_node(iter, n1, reserve, as);
1455
1456                 bch2_btree_build_aux_trees(n2);
1457                 bch2_btree_build_aux_trees(n1);
1458                 six_unlock_write(&n2->lock);
1459                 six_unlock_write(&n1->lock);
1460
1461                 bch2_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent);
1462
1463                 /*
1464                  * Note that on recursive parent_keys == insert_keys, so we
1465                  * can't start adding new keys to parent_keys before emptying it
1466                  * out (which we did with btree_split_insert_keys() above)
1467                  */
1468                 bch2_keylist_add(&as->parent_keys, &n1->key);
1469                 bch2_keylist_add(&as->parent_keys, &n2->key);
1470
1471                 if (!parent) {
1472                         /* Depth increases, make a new root */
1473                         n3 = __btree_root_alloc(c, b->level + 1,
1474                                                 iter->btree_id,
1475                                                 reserve);
1476                         list_add(&n3->reachable, &as->reachable_list);
1477
1478                         n3->sib_u64s[0] = U16_MAX;
1479                         n3->sib_u64s[1] = U16_MAX;
1480
1481                         btree_split_insert_keys(iter, n3, &as->parent_keys,
1482                                                 reserve);
1483                         bch2_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent);
1484                 }
1485         } else {
1486                 trace_btree_node_compact(c, b, b->nr.live_u64s);
1487
1488                 bch2_btree_build_aux_trees(n1);
1489                 six_unlock_write(&n1->lock);
1490
1491                 bch2_keylist_add(&as->parent_keys, &n1->key);
1492         }
1493
1494         bch2_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent);
1495
1496         /* New nodes all written, now make them visible: */
1497
1498         if (parent) {
1499                 /* Split a non root node */
1500                 bch2_btree_insert_node(parent, iter, &as->parent_keys,
1501                                       reserve, as);
1502         } else if (n3) {
1503                 bch2_btree_set_root(iter, n3, as, reserve);
1504         } else {
1505                 /* Root filled up but didn't need to be split */
1506                 bch2_btree_set_root(iter, n1, as, reserve);
1507         }
1508
1509         bch2_btree_open_bucket_put(c, n1);
1510         if (n2)
1511                 bch2_btree_open_bucket_put(c, n2);
1512         if (n3)
1513                 bch2_btree_open_bucket_put(c, n3);
1514
1515         /*
1516          * Note - at this point other linked iterators could still have @b read
1517          * locked; we're depending on the bch2_btree_iter_node_replace() calls
1518          * below removing all references to @b so we don't return with other
1519          * iterators pointing to a node they have locked that's been freed.
1520          *
1521          * We have to free the node first because the bch2_iter_node_replace()
1522          * calls will drop _our_ iterator's reference - and intent lock - to @b.
1523          */
1524         bch2_btree_node_free_inmem(iter, b);
1525
1526         /* Successful split, update the iterator to point to the new nodes: */
1527
1528         if (n3)
1529                 bch2_btree_iter_node_replace(iter, n3);
1530         if (n2)
1531                 bch2_btree_iter_node_replace(iter, n2);
1532         bch2_btree_iter_node_replace(iter, n1);
1533
1534         bch2_time_stats_update(&c->btree_split_time, start_time);
1535 }
1536
1537 /**
1538  * bch_btree_insert_node - insert bkeys into a given btree node
1539  *
1540  * @iter:               btree iterator
1541  * @insert_keys:        list of keys to insert
1542  * @hook:               insert callback
1543  * @persistent:         if not null, @persistent will wait on journal write
1544  *
1545  * Inserts as many keys as it can into a given btree node, splitting it if full.
1546  * If a split occurred, this function will return early. This can only happen
1547  * for leaf nodes -- inserts into interior nodes have to be atomic.
1548  */
1549 void bch2_btree_insert_node(struct btree *b,
1550                            struct btree_iter *iter,
1551                            struct keylist *insert_keys,
1552                            struct btree_reserve *reserve,
1553                            struct btree_interior_update *as)
1554 {
1555         BUG_ON(!b->level);
1556         BUG_ON(!reserve || !as);
1557
1558         switch (bch2_btree_insert_keys_interior(b, iter, insert_keys,
1559                                                as, reserve)) {
1560         case BTREE_INSERT_OK:
1561                 break;
1562         case BTREE_INSERT_BTREE_NODE_FULL:
1563                 btree_split(b, iter, insert_keys, reserve, as);
1564                 break;
1565         default:
1566                 BUG();
1567         }
1568 }
1569
1570 static int bch2_btree_split_leaf(struct btree_iter *iter, unsigned flags)
1571 {
1572         struct bch_fs *c = iter->c;
1573         struct btree *b = iter->nodes[0];
1574         struct btree_reserve *reserve;
1575         struct btree_interior_update *as;
1576         struct closure cl;
1577         int ret = 0;
1578
1579         closure_init_stack(&cl);
1580
1581         /* Hack, because gc and splitting nodes doesn't mix yet: */
1582         if (!down_read_trylock(&c->gc_lock)) {
1583                 bch2_btree_iter_unlock(iter);
1584                 down_read(&c->gc_lock);
1585         }
1586
1587         /*
1588          * XXX: figure out how far we might need to split,
1589          * instead of locking/reserving all the way to the root:
1590          */
1591         if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1592                 ret = -EINTR;
1593                 goto out;
1594         }
1595
1596         reserve = bch2_btree_reserve_get(c, b, 0, flags, &cl);
1597         if (IS_ERR(reserve)) {
1598                 ret = PTR_ERR(reserve);
1599                 if (ret == -EAGAIN) {
1600                         bch2_btree_iter_unlock(iter);
1601                         up_read(&c->gc_lock);
1602                         closure_sync(&cl);
1603                         return -EINTR;
1604                 }
1605                 goto out;
1606         }
1607
1608         as = bch2_btree_interior_update_alloc(c);
1609
1610         btree_split(b, iter, NULL, reserve, as);
1611         bch2_btree_reserve_put(c, reserve);
1612
1613         bch2_btree_iter_set_locks_want(iter, 1);
1614 out:
1615         up_read(&c->gc_lock);
1616         return ret;
1617 }
1618
1619 enum btree_node_sibling {
1620         btree_prev_sib,
1621         btree_next_sib,
1622 };
1623
1624 static struct btree *btree_node_get_sibling(struct btree_iter *iter,
1625                                             struct btree *b,
1626                                             enum btree_node_sibling sib)
1627 {
1628         struct btree *parent;
1629         struct btree_node_iter node_iter;
1630         struct bkey_packed *k;
1631         BKEY_PADDED(k) tmp;
1632         struct btree *ret;
1633         unsigned level = b->level;
1634
1635         parent = iter->nodes[level + 1];
1636         if (!parent)
1637                 return NULL;
1638
1639         if (!bch2_btree_node_relock(iter, level + 1)) {
1640                 bch2_btree_iter_set_locks_want(iter, level + 2);
1641                 return ERR_PTR(-EINTR);
1642         }
1643
1644         node_iter = iter->node_iters[parent->level];
1645
1646         k = bch2_btree_node_iter_peek_all(&node_iter, parent);
1647         BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
1648
1649         do {
1650                 k = sib == btree_prev_sib
1651                         ? bch2_btree_node_iter_prev_all(&node_iter, parent)
1652                         : (bch2_btree_node_iter_advance(&node_iter, parent),
1653                            bch2_btree_node_iter_peek_all(&node_iter, parent));
1654                 if (!k)
1655                         return NULL;
1656         } while (bkey_deleted(k));
1657
1658         bch2_bkey_unpack(parent, &tmp.k, k);
1659
1660         ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1661
1662         if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) {
1663                 btree_node_unlock(iter, level);
1664                 ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1665         }
1666
1667         if (!IS_ERR(ret) && !bch2_btree_node_relock(iter, level)) {
1668                 six_unlock_intent(&ret->lock);
1669                 ret = ERR_PTR(-EINTR);
1670         }
1671
1672         return ret;
1673 }
1674
1675 static int __foreground_maybe_merge(struct btree_iter *iter,
1676                                     enum btree_node_sibling sib)
1677 {
1678         struct bch_fs *c = iter->c;
1679         struct btree_reserve *reserve;
1680         struct btree_interior_update *as;
1681         struct bkey_format_state new_s;
1682         struct bkey_format new_f;
1683         struct bkey_i delete;
1684         struct btree *b, *m, *n, *prev, *next, *parent;
1685         struct closure cl;
1686         size_t sib_u64s;
1687         int ret = 0;
1688
1689         closure_init_stack(&cl);
1690 retry:
1691         if (!bch2_btree_node_relock(iter, iter->level))
1692                 return 0;
1693
1694         b = iter->nodes[iter->level];
1695
1696         parent = iter->nodes[b->level + 1];
1697         if (!parent)
1698                 return 0;
1699
1700         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1701                 return 0;
1702
1703         /* XXX: can't be holding read locks */
1704         m = btree_node_get_sibling(iter, b, sib);
1705         if (IS_ERR(m)) {
1706                 ret = PTR_ERR(m);
1707                 goto out;
1708         }
1709
1710         /* NULL means no sibling: */
1711         if (!m) {
1712                 b->sib_u64s[sib] = U16_MAX;
1713                 return 0;
1714         }
1715
1716         if (sib == btree_prev_sib) {
1717                 prev = m;
1718                 next = b;
1719         } else {
1720                 prev = b;
1721                 next = m;
1722         }
1723
1724         bch2_bkey_format_init(&new_s);
1725         __bch2_btree_calc_format(&new_s, b);
1726         __bch2_btree_calc_format(&new_s, m);
1727         new_f = bch2_bkey_format_done(&new_s);
1728
1729         sib_u64s = btree_node_u64s_with_format(b, &new_f) +
1730                 btree_node_u64s_with_format(m, &new_f);
1731
1732         if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
1733                 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1734                 sib_u64s /= 2;
1735                 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1736         }
1737
1738         sib_u64s = min(sib_u64s, btree_max_u64s(c));
1739         b->sib_u64s[sib] = sib_u64s;
1740
1741         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
1742                 six_unlock_intent(&m->lock);
1743                 return 0;
1744         }
1745
1746         /* We're changing btree topology, doesn't mix with gc: */
1747         if (!down_read_trylock(&c->gc_lock)) {
1748                 six_unlock_intent(&m->lock);
1749                 bch2_btree_iter_unlock(iter);
1750
1751                 down_read(&c->gc_lock);
1752                 up_read(&c->gc_lock);
1753                 ret = -EINTR;
1754                 goto out;
1755         }
1756
1757         if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1758                 ret = -EINTR;
1759                 goto out_unlock;
1760         }
1761
1762         reserve = bch2_btree_reserve_get(c, b, 0,
1763                                         BTREE_INSERT_NOFAIL|
1764                                         BTREE_INSERT_USE_RESERVE,
1765                                         &cl);
1766         if (IS_ERR(reserve)) {
1767                 ret = PTR_ERR(reserve);
1768                 goto out_unlock;
1769         }
1770
1771         as = bch2_btree_interior_update_alloc(c);
1772
1773         bch2_btree_interior_update_will_free_node(c, as, b);
1774         bch2_btree_interior_update_will_free_node(c, as, m);
1775
1776         n = bch2_btree_node_alloc(c, b->level, b->btree_id, reserve);
1777         list_add(&n->reachable, &as->reachable_list);
1778
1779         n->data->min_key        = prev->data->min_key;
1780         n->data->max_key        = next->data->max_key;
1781         n->data->format         = new_f;
1782         n->key.k.p              = next->key.k.p;
1783
1784         btree_node_set_format(n, new_f);
1785
1786         bch2_btree_sort_into(c, n, prev);
1787         bch2_btree_sort_into(c, n, next);
1788
1789         bch2_btree_build_aux_trees(n);
1790         six_unlock_write(&n->lock);
1791
1792         bkey_init(&delete.k);
1793         delete.k.p = prev->key.k.p;
1794         bch2_keylist_add(&as->parent_keys, &delete);
1795         bch2_keylist_add(&as->parent_keys, &n->key);
1796
1797         bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
1798
1799         bch2_btree_insert_node(parent, iter, &as->parent_keys, reserve, as);
1800
1801         bch2_btree_open_bucket_put(c, n);
1802         bch2_btree_node_free_inmem(iter, b);
1803         bch2_btree_node_free_inmem(iter, m);
1804         bch2_btree_iter_node_replace(iter, n);
1805
1806         bch2_btree_iter_verify(iter, n);
1807
1808         bch2_btree_reserve_put(c, reserve);
1809 out_unlock:
1810         if (ret != -EINTR && ret != -EAGAIN)
1811                 bch2_btree_iter_set_locks_want(iter, 1);
1812         six_unlock_intent(&m->lock);
1813         up_read(&c->gc_lock);
1814 out:
1815         if (ret == -EAGAIN || ret == -EINTR) {
1816                 bch2_btree_iter_unlock(iter);
1817                 ret = -EINTR;
1818         }
1819
1820         closure_sync(&cl);
1821
1822         if (ret == -EINTR) {
1823                 ret = bch2_btree_iter_traverse(iter);
1824                 if (!ret)
1825                         goto retry;
1826         }
1827
1828         return ret;
1829 }
1830
1831 static int inline foreground_maybe_merge(struct btree_iter *iter,
1832                                          enum btree_node_sibling sib)
1833 {
1834         struct bch_fs *c = iter->c;
1835         struct btree *b;
1836
1837         if (!btree_node_locked(iter, iter->level))
1838                 return 0;
1839
1840         b = iter->nodes[iter->level];
1841         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1842                 return 0;
1843
1844         return __foreground_maybe_merge(iter, sib);
1845 }
1846
1847 /**
1848  * btree_insert_key - insert a key one key into a leaf node
1849  */
1850 static enum btree_insert_ret
1851 btree_insert_key(struct btree_insert *trans,
1852                  struct btree_insert_entry *insert)
1853 {
1854         struct bch_fs *c = trans->c;
1855         struct btree_iter *iter = insert->iter;
1856         struct btree *b = iter->nodes[0];
1857         enum btree_insert_ret ret;
1858         int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
1859         int old_live_u64s = b->nr.live_u64s;
1860         int live_u64s_added, u64s_added;
1861
1862         ret = !btree_node_is_extents(b)
1863                 ? bch2_insert_fixup_key(trans, insert)
1864                 : bch2_insert_fixup_extent(trans, insert);
1865
1866         live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
1867         u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
1868
1869         if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
1870                 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
1871         if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
1872                 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
1873
1874         if (u64s_added > live_u64s_added &&
1875             bch2_maybe_compact_whiteouts(iter->c, b))
1876                 bch2_btree_iter_reinit_node(iter, b);
1877
1878         trace_btree_insert_key(c, b, insert->k);
1879         return ret;
1880 }
1881
1882 static bool same_leaf_as_prev(struct btree_insert *trans,
1883                               struct btree_insert_entry *i)
1884 {
1885         /*
1886          * Because we sorted the transaction entries, if multiple iterators
1887          * point to the same leaf node they'll always be adjacent now:
1888          */
1889         return i != trans->entries &&
1890                 i[0].iter->nodes[0] == i[-1].iter->nodes[0];
1891 }
1892
1893 #define trans_for_each_entry(trans, i)                                  \
1894         for ((i) = (trans)->entries; (i) < (trans)->entries + (trans)->nr; (i)++)
1895
1896 static void multi_lock_write(struct btree_insert *trans)
1897 {
1898         struct btree_insert_entry *i;
1899
1900         trans_for_each_entry(trans, i)
1901                 if (!same_leaf_as_prev(trans, i))
1902                         btree_node_lock_for_insert(i->iter->nodes[0], i->iter);
1903 }
1904
1905 static void multi_unlock_write(struct btree_insert *trans)
1906 {
1907         struct btree_insert_entry *i;
1908
1909         trans_for_each_entry(trans, i)
1910                 if (!same_leaf_as_prev(trans, i))
1911                         bch2_btree_node_unlock_write(i->iter->nodes[0], i->iter);
1912 }
1913
1914 static int btree_trans_entry_cmp(const void *_l, const void *_r)
1915 {
1916         const struct btree_insert_entry *l = _l;
1917         const struct btree_insert_entry *r = _r;
1918
1919         return btree_iter_cmp(l->iter, r->iter);
1920 }
1921
1922 /* Normal update interface: */
1923
1924 /**
1925  * __bch_btree_insert_at - insert keys at given iterator positions
1926  *
1927  * This is main entry point for btree updates.
1928  *
1929  * Return values:
1930  * -EINTR: locking changed, this function should be called again. Only returned
1931  *  if passed BTREE_INSERT_ATOMIC.
1932  * -EROFS: filesystem read only
1933  * -EIO: journal or btree node IO error
1934  */
1935 int __bch2_btree_insert_at(struct btree_insert *trans)
1936 {
1937         struct bch_fs *c = trans->c;
1938         struct btree_insert_entry *i;
1939         struct btree_iter *split = NULL;
1940         bool cycle_gc_lock = false;
1941         unsigned u64s;
1942         int ret;
1943
1944         trans_for_each_entry(trans, i) {
1945                 BUG_ON(i->iter->level);
1946                 BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
1947         }
1948
1949         sort(trans->entries, trans->nr, sizeof(trans->entries[0]),
1950              btree_trans_entry_cmp, NULL);
1951
1952         if (unlikely(!percpu_ref_tryget(&c->writes)))
1953                 return -EROFS;
1954 retry_locks:
1955         ret = -EINTR;
1956         trans_for_each_entry(trans, i)
1957                 if (!bch2_btree_iter_set_locks_want(i->iter, 1))
1958                         goto err;
1959 retry:
1960         trans->did_work = false;
1961         u64s = 0;
1962         trans_for_each_entry(trans, i)
1963                 if (!i->done)
1964                         u64s += jset_u64s(i->k->k.u64s + i->extra_res);
1965
1966         memset(&trans->journal_res, 0, sizeof(trans->journal_res));
1967
1968         ret = !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)
1969                 ? bch2_journal_res_get(&c->journal,
1970                                       &trans->journal_res,
1971                                       u64s, u64s)
1972                 : 0;
1973         if (ret)
1974                 goto err;
1975
1976         multi_lock_write(trans);
1977
1978         u64s = 0;
1979         trans_for_each_entry(trans, i) {
1980                 /* Multiple inserts might go to same leaf: */
1981                 if (!same_leaf_as_prev(trans, i))
1982                         u64s = 0;
1983
1984                 /*
1985                  * bch2_btree_node_insert_fits() must be called under write lock:
1986                  * with only an intent lock, another thread can still call
1987                  * bch2_btree_node_write(), converting an unwritten bset to a
1988                  * written one
1989                  */
1990                 if (!i->done) {
1991                         u64s += i->k->k.u64s + i->extra_res;
1992                         if (!bch2_btree_node_insert_fits(c,
1993                                         i->iter->nodes[0], u64s)) {
1994                                 split = i->iter;
1995                                 goto unlock;
1996                         }
1997                 }
1998         }
1999
2000         ret = 0;
2001         split = NULL;
2002         cycle_gc_lock = false;
2003
2004         trans_for_each_entry(trans, i) {
2005                 if (i->done)
2006                         continue;
2007
2008                 switch (btree_insert_key(trans, i)) {
2009                 case BTREE_INSERT_OK:
2010                         i->done = true;
2011                         break;
2012                 case BTREE_INSERT_JOURNAL_RES_FULL:
2013                 case BTREE_INSERT_NEED_TRAVERSE:
2014                         ret = -EINTR;
2015                         break;
2016                 case BTREE_INSERT_NEED_RESCHED:
2017                         ret = -EAGAIN;
2018                         break;
2019                 case BTREE_INSERT_BTREE_NODE_FULL:
2020                         split = i->iter;
2021                         break;
2022                 case BTREE_INSERT_ENOSPC:
2023                         ret = -ENOSPC;
2024                         break;
2025                 case BTREE_INSERT_NEED_GC_LOCK:
2026                         cycle_gc_lock = true;
2027                         ret = -EINTR;
2028                         break;
2029                 default:
2030                         BUG();
2031                 }
2032
2033                 if (!trans->did_work && (ret || split))
2034                         break;
2035         }
2036 unlock:
2037         multi_unlock_write(trans);
2038         bch2_journal_res_put(&c->journal, &trans->journal_res);
2039
2040         if (split)
2041                 goto split;
2042         if (ret)
2043                 goto err;
2044
2045         /*
2046          * hack: iterators are inconsistent when they hit end of leaf, until
2047          * traversed again
2048          */
2049         trans_for_each_entry(trans, i)
2050                 if (i->iter->flags & BTREE_ITER_AT_END_OF_LEAF)
2051                         goto out;
2052
2053         trans_for_each_entry(trans, i)
2054                 if (!same_leaf_as_prev(trans, i)) {
2055                         foreground_maybe_merge(i->iter, btree_prev_sib);
2056                         foreground_maybe_merge(i->iter, btree_next_sib);
2057                 }
2058 out:
2059         /* make sure we didn't lose an error: */
2060         if (!ret && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
2061                 trans_for_each_entry(trans, i)
2062                         BUG_ON(!i->done);
2063
2064         percpu_ref_put(&c->writes);
2065         return ret;
2066 split:
2067         /*
2068          * have to drop journal res before splitting, because splitting means
2069          * allocating new btree nodes, and holding a journal reservation
2070          * potentially blocks the allocator:
2071          */
2072         ret = bch2_btree_split_leaf(split, trans->flags);
2073         if (ret)
2074                 goto err;
2075         /*
2076          * if the split didn't have to drop locks the insert will still be
2077          * atomic (in the BTREE_INSERT_ATOMIC sense, what the caller peeked()
2078          * and is overwriting won't have changed)
2079          */
2080         goto retry_locks;
2081 err:
2082         if (cycle_gc_lock) {
2083                 down_read(&c->gc_lock);
2084                 up_read(&c->gc_lock);
2085         }
2086
2087         if (ret == -EINTR) {
2088                 trans_for_each_entry(trans, i) {
2089                         int ret2 = bch2_btree_iter_traverse(i->iter);
2090                         if (ret2) {
2091                                 ret = ret2;
2092                                 goto out;
2093                         }
2094                 }
2095
2096                 /*
2097                  * BTREE_ITER_ATOMIC means we have to return -EINTR if we
2098                  * dropped locks:
2099                  */
2100                 if (!(trans->flags & BTREE_INSERT_ATOMIC))
2101                         goto retry;
2102         }
2103
2104         goto out;
2105 }
2106
2107 int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
2108 {
2109         struct bkey_i k;
2110
2111         bkey_init(&k.k);
2112         k.k.p = iter->pos;
2113
2114         return bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
2115                                     BTREE_INSERT_NOFAIL|
2116                                     BTREE_INSERT_USE_RESERVE|flags,
2117                                     BTREE_INSERT_ENTRY(iter, &k));
2118 }
2119
2120 int bch2_btree_insert_list_at(struct btree_iter *iter,
2121                              struct keylist *keys,
2122                              struct disk_reservation *disk_res,
2123                              struct extent_insert_hook *hook,
2124                              u64 *journal_seq, unsigned flags)
2125 {
2126         BUG_ON(flags & BTREE_INSERT_ATOMIC);
2127         BUG_ON(bch2_keylist_empty(keys));
2128         verify_keys_sorted(keys);
2129
2130         while (!bch2_keylist_empty(keys)) {
2131                 /* need to traverse between each insert */
2132                 int ret = bch2_btree_iter_traverse(iter);
2133                 if (ret)
2134                         return ret;
2135
2136                 ret = bch2_btree_insert_at(iter->c, disk_res, hook,
2137                                 journal_seq, flags,
2138                                 BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
2139                 if (ret)
2140                         return ret;
2141
2142                 bch2_keylist_pop_front(keys);
2143         }
2144
2145         return 0;
2146 }
2147
2148 /**
2149  * bch_btree_insert - insert keys into the extent btree
2150  * @c:                  pointer to struct bch_fs
2151  * @id:                 btree to insert into
2152  * @insert_keys:        list of keys to insert
2153  * @hook:               insert callback
2154  */
2155 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
2156                      struct bkey_i *k,
2157                      struct disk_reservation *disk_res,
2158                      struct extent_insert_hook *hook,
2159                      u64 *journal_seq, int flags)
2160 {
2161         struct btree_iter iter;
2162         int ret, ret2;
2163
2164         bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
2165                              BTREE_ITER_INTENT);
2166
2167         ret = bch2_btree_iter_traverse(&iter);
2168         if (unlikely(ret))
2169                 goto out;
2170
2171         ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
2172                                   BTREE_INSERT_ENTRY(&iter, k));
2173 out:    ret2 = bch2_btree_iter_unlock(&iter);
2174
2175         return ret ?: ret2;
2176 }
2177
2178 /**
2179  * bch_btree_update - like bch2_btree_insert(), but asserts that we're
2180  * overwriting an existing key
2181  */
2182 int bch2_btree_update(struct bch_fs *c, enum btree_id id,
2183                      struct bkey_i *k, u64 *journal_seq)
2184 {
2185         struct btree_iter iter;
2186         struct bkey_s_c u;
2187         int ret;
2188
2189         EBUG_ON(id == BTREE_ID_EXTENTS);
2190
2191         bch2_btree_iter_init(&iter, c, id, k->k.p,
2192                              BTREE_ITER_INTENT);
2193
2194         u = bch2_btree_iter_peek_with_holes(&iter);
2195         ret = btree_iter_err(u);
2196         if (ret)
2197                 return ret;
2198
2199         if (bkey_deleted(u.k)) {
2200                 bch2_btree_iter_unlock(&iter);
2201                 return -ENOENT;
2202         }
2203
2204         ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, 0,
2205                                   BTREE_INSERT_ENTRY(&iter, k));
2206         bch2_btree_iter_unlock(&iter);
2207         return ret;
2208 }
2209
2210 /*
2211  * bch_btree_delete_range - delete everything within a given range
2212  *
2213  * Range is a half open interval - [start, end)
2214  */
2215 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
2216                            struct bpos start,
2217                            struct bpos end,
2218                            struct bversion version,
2219                            struct disk_reservation *disk_res,
2220                            struct extent_insert_hook *hook,
2221                            u64 *journal_seq)
2222 {
2223         struct btree_iter iter;
2224         struct bkey_s_c k;
2225         int ret = 0;
2226
2227         bch2_btree_iter_init(&iter, c, id, start,
2228                              BTREE_ITER_INTENT);
2229
2230         while ((k = bch2_btree_iter_peek(&iter)).k &&
2231                !(ret = btree_iter_err(k))) {
2232                 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
2233                 /* really shouldn't be using a bare, unpadded bkey_i */
2234                 struct bkey_i delete;
2235
2236                 if (bkey_cmp(iter.pos, end) >= 0)
2237                         break;
2238
2239                 bkey_init(&delete.k);
2240
2241                 /*
2242                  * For extents, iter.pos won't necessarily be the same as
2243                  * bkey_start_pos(k.k) (for non extents they always will be the
2244                  * same). It's important that we delete starting from iter.pos
2245                  * because the range we want to delete could start in the middle
2246                  * of k.
2247                  *
2248                  * (bch2_btree_iter_peek() does guarantee that iter.pos >=
2249                  * bkey_start_pos(k.k)).
2250                  */
2251                 delete.k.p = iter.pos;
2252                 delete.k.version = version;
2253
2254                 if (iter.flags & BTREE_ITER_IS_EXTENTS) {
2255                         /*
2256                          * The extents btree is special - KEY_TYPE_DISCARD is
2257                          * used for deletions, not KEY_TYPE_DELETED. This is an
2258                          * internal implementation detail that probably
2259                          * shouldn't be exposed (internally, KEY_TYPE_DELETED is
2260                          * used as a proxy for k->size == 0):
2261                          */
2262                         delete.k.type = KEY_TYPE_DISCARD;
2263
2264                         /* create the biggest key we can */
2265                         bch2_key_resize(&delete.k, max_sectors);
2266                         bch2_cut_back(end, &delete.k);
2267                 }
2268
2269                 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
2270                                           BTREE_INSERT_NOFAIL,
2271                                           BTREE_INSERT_ENTRY(&iter, &delete));
2272                 if (ret)
2273                         break;
2274
2275                 bch2_btree_iter_cond_resched(&iter);
2276         }
2277
2278         bch2_btree_iter_unlock(&iter);
2279         return ret;
2280 }
2281
2282 /**
2283  * bch_btree_node_rewrite - Rewrite/move a btree node
2284  *
2285  * Returns 0 on success, -EINTR or -EAGAIN on failure (i.e.
2286  * btree_check_reserve() has to wait)
2287  */
2288 int bch2_btree_node_rewrite(struct btree_iter *iter, struct btree *b,
2289                            struct closure *cl)
2290 {
2291         struct bch_fs *c = iter->c;
2292         struct btree *n, *parent = iter->nodes[b->level + 1];
2293         struct btree_reserve *reserve;
2294         struct btree_interior_update *as;
2295         unsigned flags = BTREE_INSERT_NOFAIL;
2296
2297         /*
2298          * if caller is going to wait if allocating reserve fails, then this is
2299          * a rewrite that must succeed:
2300          */
2301         if (cl)
2302                 flags |= BTREE_INSERT_USE_RESERVE;
2303
2304         if (!bch2_btree_iter_set_locks_want(iter, U8_MAX))
2305                 return -EINTR;
2306
2307         reserve = bch2_btree_reserve_get(c, b, 0, flags, cl);
2308         if (IS_ERR(reserve)) {
2309                 trace_btree_gc_rewrite_node_fail(c, b);
2310                 return PTR_ERR(reserve);
2311         }
2312
2313         as = bch2_btree_interior_update_alloc(c);
2314
2315         bch2_btree_interior_update_will_free_node(c, as, b);
2316
2317         n = bch2_btree_node_alloc_replacement(c, b, reserve);
2318         list_add(&n->reachable, &as->reachable_list);
2319
2320         bch2_btree_build_aux_trees(n);
2321         six_unlock_write(&n->lock);
2322
2323         trace_btree_gc_rewrite_node(c, b);
2324
2325         bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
2326
2327         if (parent) {
2328                 bch2_btree_insert_node(parent, iter,
2329                                       &keylist_single(&n->key),
2330                                       reserve, as);
2331         } else {
2332                 bch2_btree_set_root(iter, n, as, reserve);
2333         }
2334
2335         bch2_btree_open_bucket_put(c, n);
2336
2337         bch2_btree_node_free_inmem(iter, b);
2338
2339         BUG_ON(!bch2_btree_iter_node_replace(iter, n));
2340
2341         bch2_btree_reserve_put(c, reserve);
2342         return 0;
2343 }