]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcache/btree_update.c
43207071404540cbfa8fcf2fffdfacbdd9069c73
[bcachefs-tools-debian] / libbcache / btree_update.c
1
2 #include "bcache.h"
3 #include "alloc.h"
4 #include "bkey_methods.h"
5 #include "btree_cache.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_io.h"
9 #include "btree_iter.h"
10 #include "btree_locking.h"
11 #include "buckets.h"
12 #include "extents.h"
13 #include "journal.h"
14 #include "keylist.h"
15 #include "super-io.h"
16
17 #include <linux/random.h>
18 #include <linux/sort.h>
19 #include <trace/events/bcache.h>
20
21 static void btree_interior_update_updated_root(struct cache_set *,
22                                                struct btree_interior_update *,
23                                                enum btree_id);
24
25 /* Calculate ideal packed bkey format for new btree nodes: */
26
27 void __bch_btree_calc_format(struct bkey_format_state *s, struct btree *b)
28 {
29         struct bkey_packed *k;
30         struct bset_tree *t;
31         struct bkey uk;
32
33         bch_bkey_format_add_pos(s, b->data->min_key);
34
35         for_each_bset(b, t)
36                 for (k = btree_bkey_first(b, t);
37                      k != btree_bkey_last(b, t);
38                      k = bkey_next(k))
39                         if (!bkey_whiteout(k)) {
40                                 uk = bkey_unpack_key(b, k);
41                                 bch_bkey_format_add_key(s, &uk);
42                         }
43 }
44
45 static struct bkey_format bch_btree_calc_format(struct btree *b)
46 {
47         struct bkey_format_state s;
48
49         bch_bkey_format_init(&s);
50         __bch_btree_calc_format(&s, b);
51
52         return bch_bkey_format_done(&s);
53 }
54
55 static size_t btree_node_u64s_with_format(struct btree *b,
56                                           struct bkey_format *new_f)
57 {
58         struct bkey_format *old_f = &b->format;
59
60         /* stupid integer promotion rules */
61         ssize_t delta =
62             (((int) new_f->key_u64s - old_f->key_u64s) *
63              (int) b->nr.packed_keys) +
64             (((int) new_f->key_u64s - BKEY_U64s) *
65              (int) b->nr.unpacked_keys);
66
67         BUG_ON(delta + b->nr.live_u64s < 0);
68
69         return b->nr.live_u64s + delta;
70 }
71
72 /**
73  * btree_node_format_fits - check if we could rewrite node with a new format
74  *
75  * This assumes all keys can pack with the new format -- it just checks if
76  * the re-packed keys would fit inside the node itself.
77  */
78 bool bch_btree_node_format_fits(struct cache_set *c, struct btree *b,
79                                 struct bkey_format *new_f)
80 {
81         size_t u64s = btree_node_u64s_with_format(b, new_f);
82
83         return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
84 }
85
86 /* Btree node freeing/allocation: */
87
88 /*
89  * We're doing the index update that makes @b unreachable, update stuff to
90  * reflect that:
91  *
92  * Must be called _before_ btree_interior_update_updated_root() or
93  * btree_interior_update_updated_btree:
94  */
95 static void bch_btree_node_free_index(struct cache_set *c, struct btree *b,
96                                       enum btree_id id, struct bkey_s_c k,
97                                       struct bch_fs_usage *stats)
98 {
99         struct btree_interior_update *as;
100         struct pending_btree_node_free *d;
101
102         mutex_lock(&c->btree_interior_update_lock);
103
104         for_each_pending_btree_node_free(c, as, d)
105                 if (!bkey_cmp(k.k->p, d->key.k.p) &&
106                     bkey_val_bytes(k.k) == bkey_val_bytes(&d->key.k) &&
107                     !memcmp(k.v, &d->key.v, bkey_val_bytes(k.k)))
108                         goto found;
109
110         BUG();
111 found:
112         d->index_update_done = true;
113
114         /*
115          * Btree nodes are accounted as freed in bch_alloc_stats when they're
116          * freed from the index:
117          */
118         stats->s[S_COMPRESSED][S_META]   -= c->sb.btree_node_size;
119         stats->s[S_UNCOMPRESSED][S_META] -= c->sb.btree_node_size;
120
121         /*
122          * We're dropping @k from the btree, but it's still live until the
123          * index update is persistent so we need to keep a reference around for
124          * mark and sweep to find - that's primarily what the
125          * btree_node_pending_free list is for.
126          *
127          * So here (when we set index_update_done = true), we're moving an
128          * existing reference to a different part of the larger "gc keyspace" -
129          * and the new position comes after the old position, since GC marks
130          * the pending free list after it walks the btree.
131          *
132          * If we move the reference while mark and sweep is _between_ the old
133          * and the new position, mark and sweep will see the reference twice
134          * and it'll get double accounted - so check for that here and subtract
135          * to cancel out one of mark and sweep's markings if necessary:
136          */
137
138         /*
139          * bch_mark_key() compares the current gc pos to the pos we're
140          * moving this reference from, hence one comparison here:
141          */
142         if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
143                 struct bch_fs_usage tmp = { 0 };
144
145                 bch_mark_key(c, bkey_i_to_s_c(&d->key),
146                              -c->sb.btree_node_size, true, b
147                              ? gc_pos_btree_node(b)
148                              : gc_pos_btree_root(id),
149                              &tmp, 0);
150                 /*
151                  * Don't apply tmp - pending deletes aren't tracked in
152                  * bch_alloc_stats:
153                  */
154         }
155
156         mutex_unlock(&c->btree_interior_update_lock);
157 }
158
159 static void __btree_node_free(struct cache_set *c, struct btree *b,
160                               struct btree_iter *iter)
161 {
162         trace_bcache_btree_node_free(c, b);
163
164         BUG_ON(b == btree_node_root(c, b));
165         BUG_ON(b->ob);
166         BUG_ON(!list_empty(&b->write_blocked));
167
168         six_lock_write(&b->lock);
169
170         if (btree_node_dirty(b))
171                 bch_btree_complete_write(c, b, btree_current_write(b));
172         clear_btree_node_dirty(b);
173
174         mca_hash_remove(c, b);
175
176         mutex_lock(&c->btree_cache_lock);
177         list_move(&b->list, &c->btree_cache_freeable);
178         mutex_unlock(&c->btree_cache_lock);
179
180         /*
181          * By using six_unlock_write() directly instead of
182          * btree_node_unlock_write(), we don't update the iterator's sequence
183          * numbers and cause future btree_node_relock() calls to fail:
184          */
185         six_unlock_write(&b->lock);
186 }
187
188 void bch_btree_node_free_never_inserted(struct cache_set *c, struct btree *b)
189 {
190         struct open_bucket *ob = b->ob;
191
192         b->ob = NULL;
193
194         __btree_node_free(c, b, NULL);
195
196         bch_open_bucket_put(c, ob);
197 }
198
199 void bch_btree_node_free_inmem(struct btree_iter *iter, struct btree *b)
200 {
201         bch_btree_iter_node_drop_linked(iter, b);
202
203         __btree_node_free(iter->c, b, iter);
204
205         bch_btree_iter_node_drop(iter, b);
206 }
207
208 static void bch_btree_node_free_ondisk(struct cache_set *c,
209                                        struct pending_btree_node_free *pending)
210 {
211         struct bch_fs_usage stats = { 0 };
212
213         BUG_ON(!pending->index_update_done);
214
215         bch_mark_key(c, bkey_i_to_s_c(&pending->key),
216                      -c->sb.btree_node_size, true,
217                      gc_phase(GC_PHASE_PENDING_DELETE),
218                      &stats, 0);
219         /*
220          * Don't apply stats - pending deletes aren't tracked in
221          * bch_alloc_stats:
222          */
223 }
224
225 void btree_open_bucket_put(struct cache_set *c, struct btree *b)
226 {
227         bch_open_bucket_put(c, b->ob);
228         b->ob = NULL;
229 }
230
231 static struct btree *__bch_btree_node_alloc(struct cache_set *c,
232                                             bool use_reserve,
233                                             struct disk_reservation *res,
234                                             struct closure *cl)
235 {
236         BKEY_PADDED(k) tmp;
237         struct open_bucket *ob;
238         struct btree *b;
239         unsigned reserve = use_reserve ? 0 : BTREE_NODE_RESERVE;
240
241         mutex_lock(&c->btree_reserve_cache_lock);
242         if (c->btree_reserve_cache_nr > reserve) {
243                 struct btree_alloc *a =
244                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
245
246                 ob = a->ob;
247                 bkey_copy(&tmp.k, &a->k);
248                 mutex_unlock(&c->btree_reserve_cache_lock);
249                 goto mem_alloc;
250         }
251         mutex_unlock(&c->btree_reserve_cache_lock);
252
253 retry:
254         /* alloc_sectors is weird, I suppose */
255         bkey_extent_init(&tmp.k);
256         tmp.k.k.size = c->sb.btree_node_size,
257
258         ob = bch_alloc_sectors(c, &c->btree_write_point,
259                                bkey_i_to_extent(&tmp.k),
260                                res->nr_replicas,
261                                c->opts.metadata_replicas_required,
262                                use_reserve ? RESERVE_BTREE : RESERVE_NONE,
263                                cl);
264         if (IS_ERR(ob))
265                 return ERR_CAST(ob);
266
267         if (tmp.k.k.size < c->sb.btree_node_size) {
268                 bch_open_bucket_put(c, ob);
269                 goto retry;
270         }
271 mem_alloc:
272         b = mca_alloc(c);
273
274         /* we hold cannibalize_lock: */
275         BUG_ON(IS_ERR(b));
276         BUG_ON(b->ob);
277
278         bkey_copy(&b->key, &tmp.k);
279         b->key.k.size = 0;
280         b->ob = ob;
281
282         return b;
283 }
284
285 static struct btree *bch_btree_node_alloc(struct cache_set *c,
286                                           unsigned level, enum btree_id id,
287                                           struct btree_reserve *reserve)
288 {
289         struct btree *b;
290
291         BUG_ON(!reserve->nr);
292
293         b = reserve->b[--reserve->nr];
294
295         BUG_ON(mca_hash_insert(c, b, level, id));
296
297         set_btree_node_accessed(b);
298         set_btree_node_dirty(b);
299
300         bch_bset_init_first(b, &b->data->keys);
301         memset(&b->nr, 0, sizeof(b->nr));
302         b->data->magic = cpu_to_le64(bset_magic(c));
303         b->data->flags = 0;
304         SET_BTREE_NODE_ID(b->data, id);
305         SET_BTREE_NODE_LEVEL(b->data, level);
306         b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr;
307
308         bch_btree_build_aux_trees(b);
309
310         bch_check_mark_super(c, &b->key, true);
311
312         trace_bcache_btree_node_alloc(c, b);
313         return b;
314 }
315
316 struct btree *__btree_node_alloc_replacement(struct cache_set *c,
317                                              struct btree *b,
318                                              struct bkey_format format,
319                                              struct btree_reserve *reserve)
320 {
321         struct btree *n;
322
323         n = bch_btree_node_alloc(c, b->level, b->btree_id, reserve);
324
325         n->data->min_key        = b->data->min_key;
326         n->data->max_key        = b->data->max_key;
327         n->data->format         = format;
328
329         btree_node_set_format(n, format);
330
331         bch_btree_sort_into(c, n, b);
332
333         btree_node_reset_sib_u64s(n);
334
335         n->key.k.p = b->key.k.p;
336         trace_bcache_btree_node_alloc_replacement(c, b, n);
337
338         return n;
339 }
340
341 struct btree *btree_node_alloc_replacement(struct cache_set *c,
342                                            struct btree *b,
343                                            struct btree_reserve *reserve)
344 {
345         struct bkey_format new_f = bch_btree_calc_format(b);
346
347         /*
348          * The keys might expand with the new format - if they wouldn't fit in
349          * the btree node anymore, use the old format for now:
350          */
351         if (!bch_btree_node_format_fits(c, b, &new_f))
352                 new_f = b->format;
353
354         return __btree_node_alloc_replacement(c, b, new_f, reserve);
355 }
356
357 static void bch_btree_set_root_inmem(struct cache_set *c, struct btree *b,
358                                      struct btree_reserve *btree_reserve)
359 {
360         struct btree *old = btree_node_root(c, b);
361
362         /* Root nodes cannot be reaped */
363         mutex_lock(&c->btree_cache_lock);
364         list_del_init(&b->list);
365         mutex_unlock(&c->btree_cache_lock);
366
367         mutex_lock(&c->btree_root_lock);
368         btree_node_root(c, b) = b;
369         mutex_unlock(&c->btree_root_lock);
370
371         if (btree_reserve) {
372                 /*
373                  * New allocation (we're not being called because we're in
374                  * bch_btree_root_read()) - do marking while holding
375                  * btree_root_lock:
376                  */
377                 struct bch_fs_usage stats = { 0 };
378
379                 bch_mark_key(c, bkey_i_to_s_c(&b->key),
380                              c->sb.btree_node_size, true,
381                              gc_pos_btree_root(b->btree_id),
382                              &stats, 0);
383
384                 if (old)
385                         bch_btree_node_free_index(c, NULL, old->btree_id,
386                                                   bkey_i_to_s_c(&old->key),
387                                                   &stats);
388                 bch_fs_stats_apply(c, &stats, &btree_reserve->disk_res,
389                                    gc_pos_btree_root(b->btree_id));
390         }
391
392         bch_recalc_btree_reserve(c);
393 }
394
395 static void bch_btree_set_root_ondisk(struct cache_set *c, struct btree *b)
396 {
397         struct btree_root *r = &c->btree_roots[b->btree_id];
398
399         mutex_lock(&c->btree_root_lock);
400
401         BUG_ON(b != r->b);
402         bkey_copy(&r->key, &b->key);
403         r->level = b->level;
404         r->alive = true;
405
406         mutex_unlock(&c->btree_root_lock);
407 }
408
409 /*
410  * Only for cache set bringup, when first reading the btree roots or allocating
411  * btree roots when initializing a new cache set:
412  */
413 void bch_btree_set_root_initial(struct cache_set *c, struct btree *b,
414                                 struct btree_reserve *btree_reserve)
415 {
416         BUG_ON(btree_node_root(c, b));
417
418         bch_btree_set_root_inmem(c, b, btree_reserve);
419         bch_btree_set_root_ondisk(c, b);
420 }
421
422 /**
423  * bch_btree_set_root - update the root in memory and on disk
424  *
425  * To ensure forward progress, the current task must not be holding any
426  * btree node write locks. However, you must hold an intent lock on the
427  * old root.
428  *
429  * Note: This allocates a journal entry but doesn't add any keys to
430  * it.  All the btree roots are part of every journal write, so there
431  * is nothing new to be done.  This just guarantees that there is a
432  * journal write.
433  */
434 static void bch_btree_set_root(struct btree_iter *iter, struct btree *b,
435                                struct btree_interior_update *as,
436                                struct btree_reserve *btree_reserve)
437 {
438         struct cache_set *c = iter->c;
439         struct btree *old;
440
441         trace_bcache_btree_set_root(c, b);
442         BUG_ON(!b->written);
443
444         old = btree_node_root(c, b);
445
446         /*
447          * Ensure no one is using the old root while we switch to the
448          * new root:
449          */
450         btree_node_lock_write(old, iter);
451
452         bch_btree_set_root_inmem(c, b, btree_reserve);
453
454         btree_interior_update_updated_root(c, as, iter->btree_id);
455
456         /*
457          * Unlock old root after new root is visible:
458          *
459          * The new root isn't persistent, but that's ok: we still have
460          * an intent lock on the new root, and any updates that would
461          * depend on the new root would have to update the new root.
462          */
463         btree_node_unlock_write(old, iter);
464 }
465
466 static struct btree *__btree_root_alloc(struct cache_set *c, unsigned level,
467                                         enum btree_id id,
468                                         struct btree_reserve *reserve)
469 {
470         struct btree *b = bch_btree_node_alloc(c, level, id, reserve);
471
472         b->data->min_key = POS_MIN;
473         b->data->max_key = POS_MAX;
474         b->data->format = bch_btree_calc_format(b);
475         b->key.k.p = POS_MAX;
476
477         btree_node_set_format(b, b->data->format);
478         bch_btree_build_aux_trees(b);
479
480         six_unlock_write(&b->lock);
481
482         return b;
483 }
484
485 void bch_btree_reserve_put(struct cache_set *c, struct btree_reserve *reserve)
486 {
487         bch_disk_reservation_put(c, &reserve->disk_res);
488
489         mutex_lock(&c->btree_reserve_cache_lock);
490
491         while (reserve->nr) {
492                 struct btree *b = reserve->b[--reserve->nr];
493
494                 six_unlock_write(&b->lock);
495
496                 if (c->btree_reserve_cache_nr <
497                     ARRAY_SIZE(c->btree_reserve_cache)) {
498                         struct btree_alloc *a =
499                                 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
500
501                         a->ob = b->ob;
502                         b->ob = NULL;
503                         bkey_copy(&a->k, &b->key);
504                 } else {
505                         bch_open_bucket_put(c, b->ob);
506                         b->ob = NULL;
507                 }
508
509                 __btree_node_free(c, b, NULL);
510
511                 six_unlock_intent(&b->lock);
512         }
513
514         mutex_unlock(&c->btree_reserve_cache_lock);
515
516         mempool_free(reserve, &c->btree_reserve_pool);
517 }
518
519 static struct btree_reserve *__bch_btree_reserve_get(struct cache_set *c,
520                                                      unsigned nr_nodes,
521                                                      unsigned flags,
522                                                      struct closure *cl)
523 {
524         struct btree_reserve *reserve;
525         struct btree *b;
526         struct disk_reservation disk_res = { 0, 0 };
527         unsigned sectors = nr_nodes * c->sb.btree_node_size;
528         int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD|
529                 BCH_DISK_RESERVATION_METADATA;
530
531         if (flags & BTREE_INSERT_NOFAIL)
532                 disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
533
534         /*
535          * This check isn't necessary for correctness - it's just to potentially
536          * prevent us from doing a lot of work that'll end up being wasted:
537          */
538         ret = bch_journal_error(&c->journal);
539         if (ret)
540                 return ERR_PTR(ret);
541
542         if (bch_disk_reservation_get(c, &disk_res, sectors, disk_res_flags))
543                 return ERR_PTR(-ENOSPC);
544
545         BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
546
547         /*
548          * Protects reaping from the btree node cache and using the btree node
549          * open bucket reserve:
550          */
551         ret = mca_cannibalize_lock(c, cl);
552         if (ret) {
553                 bch_disk_reservation_put(c, &disk_res);
554                 return ERR_PTR(ret);
555         }
556
557         reserve = mempool_alloc(&c->btree_reserve_pool, GFP_NOIO);
558
559         reserve->disk_res = disk_res;
560         reserve->nr = 0;
561
562         while (reserve->nr < nr_nodes) {
563                 b = __bch_btree_node_alloc(c, flags & BTREE_INSERT_USE_RESERVE,
564                                            &disk_res, cl);
565                 if (IS_ERR(b)) {
566                         ret = PTR_ERR(b);
567                         goto err_free;
568                 }
569
570                 reserve->b[reserve->nr++] = b;
571         }
572
573         mca_cannibalize_unlock(c);
574         return reserve;
575 err_free:
576         bch_btree_reserve_put(c, reserve);
577         mca_cannibalize_unlock(c);
578         trace_bcache_btree_reserve_get_fail(c, nr_nodes, cl);
579         return ERR_PTR(ret);
580 }
581
582 struct btree_reserve *bch_btree_reserve_get(struct cache_set *c,
583                                             struct btree *b,
584                                             unsigned extra_nodes,
585                                             unsigned flags,
586                                             struct closure *cl)
587 {
588         unsigned depth = btree_node_root(c, b)->level - b->level;
589         unsigned nr_nodes = btree_reserve_required_nodes(depth) + extra_nodes;
590
591         return __bch_btree_reserve_get(c, nr_nodes, flags, cl);
592
593 }
594
595 int bch_btree_root_alloc(struct cache_set *c, enum btree_id id,
596                          struct closure *writes)
597 {
598         struct closure cl;
599         struct btree_reserve *reserve;
600         struct btree *b;
601
602         closure_init_stack(&cl);
603
604         while (1) {
605                 /* XXX haven't calculated capacity yet :/ */
606                 reserve = __bch_btree_reserve_get(c, 1, 0, &cl);
607                 if (!IS_ERR(reserve))
608                         break;
609
610                 if (PTR_ERR(reserve) == -ENOSPC)
611                         return PTR_ERR(reserve);
612
613                 closure_sync(&cl);
614         }
615
616         b = __btree_root_alloc(c, 0, id, reserve);
617
618         bch_btree_node_write(c, b, writes, SIX_LOCK_intent, -1);
619
620         bch_btree_set_root_initial(c, b, reserve);
621         btree_open_bucket_put(c, b);
622         six_unlock_intent(&b->lock);
623
624         bch_btree_reserve_put(c, reserve);
625
626         return 0;
627 }
628
629 static void bch_insert_fixup_btree_ptr(struct btree_iter *iter,
630                                        struct btree *b,
631                                        struct bkey_i *insert,
632                                        struct btree_node_iter *node_iter,
633                                        struct disk_reservation *disk_res)
634 {
635         struct cache_set *c = iter->c;
636         struct bch_fs_usage stats = { 0 };
637         struct bkey_packed *k;
638         struct bkey tmp;
639
640         if (bkey_extent_is_data(&insert->k))
641                 bch_mark_key(c, bkey_i_to_s_c(insert),
642                              c->sb.btree_node_size, true,
643                              gc_pos_btree_node(b), &stats, 0);
644
645         while ((k = bch_btree_node_iter_peek_all(node_iter, b)) &&
646                !btree_iter_pos_cmp_packed(b, &insert->k.p, k, false))
647                 bch_btree_node_iter_advance(node_iter, b);
648
649         /*
650          * If we're overwriting, look up pending delete and mark so that gc
651          * marks it on the pending delete list:
652          */
653         if (k && !bkey_cmp_packed(b, k, &insert->k))
654                 bch_btree_node_free_index(c, b, iter->btree_id,
655                                           bkey_disassemble(b, k, &tmp),
656                                           &stats);
657
658         bch_fs_stats_apply(c, &stats, disk_res, gc_pos_btree_node(b));
659
660         bch_btree_bset_insert_key(iter, b, node_iter, insert);
661         set_btree_node_dirty(b);
662 }
663
664 /* Inserting into a given leaf node (last stage of insert): */
665
666 /* Handle overwrites and do insert, for non extents: */
667 bool bch_btree_bset_insert_key(struct btree_iter *iter,
668                                struct btree *b,
669                                struct btree_node_iter *node_iter,
670                                struct bkey_i *insert)
671 {
672         const struct bkey_format *f = &b->format;
673         struct bkey_packed *k;
674         struct bset_tree *t;
675         unsigned clobber_u64s;
676
677         EBUG_ON(btree_node_just_written(b));
678         EBUG_ON(bset_written(b, btree_bset_last(b)));
679         EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
680         EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
681                 bkey_cmp(insert->k.p, b->data->max_key) > 0);
682         BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(iter->c, b));
683
684         k = bch_btree_node_iter_peek_all(node_iter, b);
685         if (k && !bkey_cmp_packed(b, k, &insert->k)) {
686                 BUG_ON(bkey_whiteout(k));
687
688                 t = bch_bkey_to_bset(b, k);
689
690                 if (bset_unwritten(b, bset(b, t)) &&
691                     bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) {
692                         BUG_ON(bkey_whiteout(k) != bkey_whiteout(&insert->k));
693
694                         k->type = insert->k.type;
695                         memcpy_u64s(bkeyp_val(f, k), &insert->v,
696                                     bkey_val_u64s(&insert->k));
697                         return true;
698                 }
699
700                 insert->k.needs_whiteout = k->needs_whiteout;
701
702                 btree_keys_account_key_drop(&b->nr, t - b->set, k);
703
704                 if (t == bset_tree_last(b)) {
705                         clobber_u64s = k->u64s;
706
707                         /*
708                          * If we're deleting, and the key we're deleting doesn't
709                          * need a whiteout (it wasn't overwriting a key that had
710                          * been written to disk) - just delete it:
711                          */
712                         if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
713                                 bch_bset_delete(b, k, clobber_u64s);
714                                 bch_btree_node_iter_fix(iter, b, node_iter, t,
715                                                         k, clobber_u64s, 0);
716                                 return true;
717                         }
718
719                         goto overwrite;
720                 }
721
722                 k->type = KEY_TYPE_DELETED;
723                 bch_btree_node_iter_fix(iter, b, node_iter, t, k,
724                                         k->u64s, k->u64s);
725
726                 if (bkey_whiteout(&insert->k)) {
727                         reserve_whiteout(b, t, k);
728                         return true;
729                 } else {
730                         k->needs_whiteout = false;
731                 }
732         } else {
733                 /*
734                  * Deleting, but the key to delete wasn't found - nothing to do:
735                  */
736                 if (bkey_whiteout(&insert->k))
737                         return false;
738
739                 insert->k.needs_whiteout = false;
740         }
741
742         t = bset_tree_last(b);
743         k = bch_btree_node_iter_bset_pos(node_iter, b, t);
744         clobber_u64s = 0;
745 overwrite:
746         bch_bset_insert(b, node_iter, k, insert, clobber_u64s);
747         if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
748                 bch_btree_node_iter_fix(iter, b, node_iter, t, k,
749                                         clobber_u64s, k->u64s);
750         return true;
751 }
752
753 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
754                                unsigned i)
755 {
756         struct cache_set *c = container_of(j, struct cache_set, journal);
757         struct btree_write *w = container_of(pin, struct btree_write, journal);
758         struct btree *b = container_of(w, struct btree, writes[i]);
759
760         six_lock_read(&b->lock);
761         /*
762          * Reusing a btree node can race with the journal reclaim code calling
763          * the journal pin flush fn, and there's no good fix for this: we don't
764          * really want journal_pin_drop() to block until the flush fn is no
765          * longer running, because journal_pin_drop() is called from the btree
766          * node write endio function, and we can't wait on the flush fn to
767          * finish running in mca_reap() - where we make reused btree nodes ready
768          * to use again - because there, we're holding the lock this function
769          * needs - deadlock.
770          *
771          * So, the b->level check is a hack so we don't try to write nodes we
772          * shouldn't:
773          */
774         if (!b->level)
775                 bch_btree_node_write(c, b, NULL, SIX_LOCK_read, i);
776         six_unlock_read(&b->lock);
777 }
778
779 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin)
780 {
781         return __btree_node_flush(j, pin, 0);
782 }
783
784 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin)
785 {
786         return __btree_node_flush(j, pin, 1);
787 }
788
789 void bch_btree_journal_key(struct btree_insert *trans,
790                            struct btree_iter *iter,
791                            struct bkey_i *insert)
792 {
793         struct cache_set *c = trans->c;
794         struct journal *j = &c->journal;
795         struct btree *b = iter->nodes[0];
796         struct btree_write *w = btree_current_write(b);
797
798         EBUG_ON(iter->level || b->level);
799         EBUG_ON(!trans->journal_res.ref &&
800                 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
801
802         if (!journal_pin_active(&w->journal))
803                 bch_journal_pin_add(j, &w->journal,
804                                     btree_node_write_idx(b) == 0
805                                     ? btree_node_flush0
806                                     : btree_node_flush1);
807
808         if (trans->journal_res.ref) {
809                 u64 seq = trans->journal_res.seq;
810                 bool needs_whiteout = insert->k.needs_whiteout;
811
812                 /*
813                  * have a bug where we're seeing an extent with an invalid crc
814                  * entry in the journal, trying to track it down:
815                  */
816                 BUG_ON(bkey_invalid(c, b->btree_id, bkey_i_to_s_c(insert)));
817
818                 /* ick */
819                 insert->k.needs_whiteout = false;
820                 bch_journal_add_keys(j, &trans->journal_res,
821                                      b->btree_id, insert);
822                 insert->k.needs_whiteout = needs_whiteout;
823
824                 if (trans->journal_seq)
825                         *trans->journal_seq = seq;
826                 btree_bset_last(b)->journal_seq = cpu_to_le64(seq);
827         }
828
829         if (!btree_node_dirty(b))
830                 set_btree_node_dirty(b);
831 }
832
833 static enum btree_insert_ret
834 bch_insert_fixup_key(struct btree_insert *trans,
835                      struct btree_insert_entry *insert)
836 {
837         struct btree_iter *iter = insert->iter;
838
839         BUG_ON(iter->level);
840
841         if (bch_btree_bset_insert_key(iter,
842                                       iter->nodes[0],
843                                       &iter->node_iters[0],
844                                       insert->k))
845                 bch_btree_journal_key(trans, iter, insert->k);
846
847         trans->did_work = true;
848         return BTREE_INSERT_OK;
849 }
850
851 static void verify_keys_sorted(struct keylist *l)
852 {
853 #ifdef CONFIG_BCACHE_DEBUG
854         struct bkey_i *k;
855
856         for_each_keylist_key(l, k)
857                 BUG_ON(bkey_next(k) != l->top &&
858                        bkey_cmp(k->k.p, bkey_next(k)->k.p) >= 0);
859 #endif
860 }
861
862 static void btree_node_lock_for_insert(struct btree *b, struct btree_iter *iter)
863 {
864         struct cache_set *c = iter->c;
865
866         btree_node_lock_write(b, iter);
867
868         if (btree_node_just_written(b) &&
869             bch_btree_post_write_cleanup(c, b))
870                 bch_btree_iter_reinit_node(iter, b);
871
872         /*
873          * If the last bset has been written, or if it's gotten too big - start
874          * a new bset to insert into:
875          */
876         if (want_new_bset(c, b))
877                 bch_btree_init_next(c, b, iter);
878 }
879
880 /* Asynchronous interior node update machinery */
881
882 struct btree_interior_update *
883 bch_btree_interior_update_alloc(struct cache_set *c)
884 {
885         struct btree_interior_update *as;
886
887         as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
888         memset(as, 0, sizeof(*as));
889         closure_init(&as->cl, &c->cl);
890         as->c           = c;
891         as->mode        = BTREE_INTERIOR_NO_UPDATE;
892
893         bch_keylist_init(&as->parent_keys, as->inline_keys,
894                          ARRAY_SIZE(as->inline_keys));
895
896         mutex_lock(&c->btree_interior_update_lock);
897         list_add(&as->list, &c->btree_interior_update_list);
898         mutex_unlock(&c->btree_interior_update_lock);
899
900         return as;
901 }
902
903 static void btree_interior_update_free(struct closure *cl)
904 {
905         struct btree_interior_update *as = container_of(cl, struct btree_interior_update, cl);
906
907         mempool_free(as, &as->c->btree_interior_update_pool);
908 }
909
910 static void btree_interior_update_nodes_reachable(struct closure *cl)
911 {
912         struct btree_interior_update *as =
913                 container_of(cl, struct btree_interior_update, cl);
914         struct cache_set *c = as->c;
915         unsigned i;
916
917         bch_journal_pin_drop(&c->journal, &as->journal);
918
919         mutex_lock(&c->btree_interior_update_lock);
920
921         for (i = 0; i < as->nr_pending; i++)
922                 bch_btree_node_free_ondisk(c, &as->pending[i]);
923         as->nr_pending = 0;
924
925         mutex_unlock(&c->btree_interior_update_lock);
926
927         mutex_lock(&c->btree_interior_update_lock);
928         list_del(&as->list);
929         mutex_unlock(&c->btree_interior_update_lock);
930
931         closure_wake_up(&as->wait);
932
933         closure_return_with_destructor(cl, btree_interior_update_free);
934 }
935
936 static void btree_interior_update_nodes_written(struct closure *cl)
937 {
938         struct btree_interior_update *as =
939                 container_of(cl, struct btree_interior_update, cl);
940         struct cache_set *c = as->c;
941         struct btree *b;
942
943         if (bch_journal_error(&c->journal)) {
944                 /* XXX what? */
945         }
946
947         /* XXX: missing error handling, damnit */
948
949         /* check for journal error, bail out if we flushed */
950
951         /*
952          * We did an update to a parent node where the pointers we added pointed
953          * to child nodes that weren't written yet: now, the child nodes have
954          * been written so we can write out the update to the interior node.
955          */
956 retry:
957         mutex_lock(&c->btree_interior_update_lock);
958         switch (as->mode) {
959         case BTREE_INTERIOR_NO_UPDATE:
960                 BUG();
961         case BTREE_INTERIOR_UPDATING_NODE:
962                 /* The usual case: */
963                 b = READ_ONCE(as->b);
964
965                 if (!six_trylock_read(&b->lock)) {
966                         mutex_unlock(&c->btree_interior_update_lock);
967                         six_lock_read(&b->lock);
968                         six_unlock_read(&b->lock);
969                         goto retry;
970                 }
971
972                 BUG_ON(!btree_node_dirty(b));
973                 closure_wait(&btree_current_write(b)->wait, cl);
974
975                 list_del(&as->write_blocked_list);
976
977                 if (list_empty(&b->write_blocked))
978                         bch_btree_node_write(c, b, NULL, SIX_LOCK_read, -1);
979                 six_unlock_read(&b->lock);
980                 break;
981
982         case BTREE_INTERIOR_UPDATING_AS:
983                 /*
984                  * The btree node we originally updated has been freed and is
985                  * being rewritten - so we need to write anything here, we just
986                  * need to signal to that btree_interior_update that it's ok to make the
987                  * new replacement node visible:
988                  */
989                 closure_put(&as->parent_as->cl);
990
991                 /*
992                  * and then we have to wait on that btree_interior_update to finish:
993                  */
994                 closure_wait(&as->parent_as->wait, cl);
995                 break;
996
997         case BTREE_INTERIOR_UPDATING_ROOT:
998                 /* b is the new btree root: */
999                 b = READ_ONCE(as->b);
1000
1001                 if (!six_trylock_read(&b->lock)) {
1002                         mutex_unlock(&c->btree_interior_update_lock);
1003                         six_lock_read(&b->lock);
1004                         six_unlock_read(&b->lock);
1005                         goto retry;
1006                 }
1007
1008                 BUG_ON(c->btree_roots[b->btree_id].as != as);
1009                 c->btree_roots[b->btree_id].as = NULL;
1010
1011                 bch_btree_set_root_ondisk(c, b);
1012
1013                 /*
1014                  * We don't have to wait anything anything here (before
1015                  * btree_interior_update_nodes_reachable frees the old nodes
1016                  * ondisk) - we've ensured that the very next journal write will
1017                  * have the pointer to the new root, and before the allocator
1018                  * can reuse the old nodes it'll have to do a journal commit:
1019                  */
1020                 six_unlock_read(&b->lock);
1021         }
1022         mutex_unlock(&c->btree_interior_update_lock);
1023
1024         continue_at(cl, btree_interior_update_nodes_reachable, system_wq);
1025 }
1026
1027 /*
1028  * We're updating @b with pointers to nodes that haven't finished writing yet:
1029  * block @b from being written until @as completes
1030  */
1031 static void btree_interior_update_updated_btree(struct cache_set *c,
1032                                                 struct btree_interior_update *as,
1033                                                 struct btree *b)
1034 {
1035         mutex_lock(&c->btree_interior_update_lock);
1036
1037         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1038         BUG_ON(!btree_node_dirty(b));
1039
1040         as->mode = BTREE_INTERIOR_UPDATING_NODE;
1041         as->b = b;
1042         list_add(&as->write_blocked_list, &b->write_blocked);
1043
1044         mutex_unlock(&c->btree_interior_update_lock);
1045
1046         bch_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
1047
1048         continue_at(&as->cl, btree_interior_update_nodes_written,
1049                     system_freezable_wq);
1050 }
1051
1052 static void btree_interior_update_updated_root(struct cache_set *c,
1053                                                struct btree_interior_update *as,
1054                                                enum btree_id btree_id)
1055 {
1056         struct btree_root *r = &c->btree_roots[btree_id];
1057
1058         mutex_lock(&c->btree_interior_update_lock);
1059
1060         BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1061
1062         /*
1063          * Old root might not be persistent yet - if so, redirect its
1064          * btree_interior_update operation to point to us:
1065          */
1066         if (r->as) {
1067                 BUG_ON(r->as->mode != BTREE_INTERIOR_UPDATING_ROOT);
1068
1069                 r->as->b = NULL;
1070                 r->as->mode = BTREE_INTERIOR_UPDATING_AS;
1071                 r->as->parent_as = as;
1072                 closure_get(&as->cl);
1073         }
1074
1075         as->mode = BTREE_INTERIOR_UPDATING_ROOT;
1076         as->b = r->b;
1077         r->as = as;
1078
1079         mutex_unlock(&c->btree_interior_update_lock);
1080
1081         bch_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
1082
1083         continue_at(&as->cl, btree_interior_update_nodes_written,
1084                     system_freezable_wq);
1085 }
1086
1087 static void interior_update_flush(struct journal *j, struct journal_entry_pin *pin)
1088 {
1089         struct btree_interior_update *as =
1090                 container_of(pin, struct btree_interior_update, journal);
1091
1092         bch_journal_flush_seq_async(j, as->journal_seq, NULL);
1093 }
1094
1095 /*
1096  * @b is being split/rewritten: it may have pointers to not-yet-written btree
1097  * nodes and thus outstanding btree_interior_updates - redirect @b's
1098  * btree_interior_updates to point to this btree_interior_update:
1099  */
1100 void bch_btree_interior_update_will_free_node(struct cache_set *c,
1101                                               struct btree_interior_update *as,
1102                                               struct btree *b)
1103 {
1104         struct btree_interior_update *p, *n;
1105         struct pending_btree_node_free *d;
1106         struct bset_tree *t;
1107
1108         /*
1109          * Does this node have data that hasn't been written in the journal?
1110          *
1111          * If so, we have to wait for the corresponding journal entry to be
1112          * written before making the new nodes reachable - we can't just carry
1113          * over the bset->journal_seq tracking, since we'll be mixing those keys
1114          * in with keys that aren't in the journal anymore:
1115          */
1116         for_each_bset(b, t)
1117                 as->journal_seq = max(as->journal_seq, bset(b, t)->journal_seq);
1118
1119         /*
1120          * Does this node have unwritten data that has a pin on the journal?
1121          *
1122          * If so, transfer that pin to the btree_interior_update operation -
1123          * note that if we're freeing multiple nodes, we only need to keep the
1124          * oldest pin of any of the nodes we're freeing. We'll release the pin
1125          * when the new nodes are persistent and reachable on disk:
1126          */
1127         bch_journal_pin_add_if_older(&c->journal,
1128                                      &b->writes[0].journal,
1129                                      &as->journal, interior_update_flush);
1130         bch_journal_pin_add_if_older(&c->journal,
1131                                      &b->writes[1].journal,
1132                                      &as->journal, interior_update_flush);
1133
1134         mutex_lock(&c->btree_interior_update_lock);
1135
1136         /*
1137          * Does this node have any btree_interior_update operations preventing
1138          * it from being written?
1139          *
1140          * If so, redirect them to point to this btree_interior_update: we can
1141          * write out our new nodes, but we won't make them visible until those
1142          * operations complete
1143          */
1144         list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
1145                 BUG_ON(p->mode != BTREE_INTERIOR_UPDATING_NODE);
1146
1147                 p->mode = BTREE_INTERIOR_UPDATING_AS;
1148                 list_del(&p->write_blocked_list);
1149                 p->b = NULL;
1150                 p->parent_as = as;
1151                 closure_get(&as->cl);
1152         }
1153
1154         /* Add this node to the list of nodes being freed: */
1155         BUG_ON(as->nr_pending >= ARRAY_SIZE(as->pending));
1156
1157         d = &as->pending[as->nr_pending++];
1158         d->index_update_done    = false;
1159         d->seq                  = b->data->keys.seq;
1160         d->btree_id             = b->btree_id;
1161         d->level                = b->level;
1162         bkey_copy(&d->key, &b->key);
1163
1164         mutex_unlock(&c->btree_interior_update_lock);
1165 }
1166
1167 static void btree_node_interior_verify(struct btree *b)
1168 {
1169         struct btree_node_iter iter;
1170         struct bkey_packed *k;
1171
1172         BUG_ON(!b->level);
1173
1174         bch_btree_node_iter_init(&iter, b, b->key.k.p, false, false);
1175 #if 1
1176         BUG_ON(!(k = bch_btree_node_iter_peek(&iter, b)) ||
1177                bkey_cmp_left_packed(b, k, &b->key.k.p));
1178
1179         BUG_ON((bch_btree_node_iter_advance(&iter, b),
1180                 !bch_btree_node_iter_end(&iter)));
1181 #else
1182         const char *msg;
1183
1184         msg = "not found";
1185         k = bch_btree_node_iter_peek(&iter, b);
1186         if (!k)
1187                 goto err;
1188
1189         msg = "isn't what it should be";
1190         if (bkey_cmp_left_packed(b, k, &b->key.k.p))
1191                 goto err;
1192
1193         bch_btree_node_iter_advance(&iter, b);
1194
1195         msg = "isn't last key";
1196         if (!bch_btree_node_iter_end(&iter))
1197                 goto err;
1198         return;
1199 err:
1200         bch_dump_btree_node(b);
1201         printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode,
1202                b->key.k.p.offset, msg);
1203         BUG();
1204 #endif
1205 }
1206
1207 static enum btree_insert_ret
1208 bch_btree_insert_keys_interior(struct btree *b,
1209                                struct btree_iter *iter,
1210                                struct keylist *insert_keys,
1211                                struct btree_interior_update *as,
1212                                struct btree_reserve *res)
1213 {
1214         struct cache_set *c = iter->c;
1215         struct btree_iter *linked;
1216         struct btree_node_iter node_iter;
1217         struct bkey_i *insert = bch_keylist_front(insert_keys);
1218         struct bkey_packed *k;
1219
1220         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1221         BUG_ON(!b->level);
1222         BUG_ON(!as || as->b);
1223         verify_keys_sorted(insert_keys);
1224
1225         btree_node_lock_for_insert(b, iter);
1226
1227         if (bch_keylist_u64s(insert_keys) >
1228             bch_btree_keys_u64s_remaining(c, b)) {
1229                 btree_node_unlock_write(b, iter);
1230                 return BTREE_INSERT_BTREE_NODE_FULL;
1231         }
1232
1233         /* Don't screw up @iter's position: */
1234         node_iter = iter->node_iters[b->level];
1235
1236         /*
1237          * btree_split(), btree_gc_coalesce() will insert keys before
1238          * the iterator's current position - they know the keys go in
1239          * the node the iterator points to:
1240          */
1241         while ((k = bch_btree_node_iter_prev_all(&node_iter, b)) &&
1242                (bkey_cmp_packed(b, k, &insert->k) >= 0))
1243                 ;
1244
1245         while (!bch_keylist_empty(insert_keys)) {
1246                 insert = bch_keylist_front(insert_keys);
1247
1248                 bch_insert_fixup_btree_ptr(iter, b, insert,
1249                                            &node_iter, &res->disk_res);
1250                 bch_keylist_pop_front(insert_keys);
1251         }
1252
1253         btree_interior_update_updated_btree(c, as, b);
1254
1255         for_each_linked_btree_node(iter, b, linked)
1256                 bch_btree_node_iter_peek(&linked->node_iters[b->level],
1257                                          b);
1258         bch_btree_node_iter_peek(&iter->node_iters[b->level], b);
1259
1260         bch_btree_iter_verify(iter, b);
1261
1262         if (bch_maybe_compact_whiteouts(c, b))
1263                 bch_btree_iter_reinit_node(iter, b);
1264
1265         btree_node_unlock_write(b, iter);
1266
1267         btree_node_interior_verify(b);
1268         return BTREE_INSERT_OK;
1269 }
1270
1271 /*
1272  * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1273  * node)
1274  */
1275 static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n1,
1276                                         struct btree_reserve *reserve)
1277 {
1278         size_t nr_packed = 0, nr_unpacked = 0;
1279         struct btree *n2;
1280         struct bset *set1, *set2;
1281         struct bkey_packed *k, *prev = NULL;
1282
1283         n2 = bch_btree_node_alloc(iter->c, n1->level, iter->btree_id, reserve);
1284         n2->data->max_key       = n1->data->max_key;
1285         n2->data->format        = n1->format;
1286         n2->key.k.p = n1->key.k.p;
1287
1288         btree_node_set_format(n2, n2->data->format);
1289
1290         set1 = btree_bset_first(n1);
1291         set2 = btree_bset_first(n2);
1292
1293         /*
1294          * Has to be a linear search because we don't have an auxiliary
1295          * search tree yet
1296          */
1297         k = set1->start;
1298         while (1) {
1299                 if (bkey_next(k) == vstruct_last(set1))
1300                         break;
1301                 if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
1302                         break;
1303
1304                 if (bkey_packed(k))
1305                         nr_packed++;
1306                 else
1307                         nr_unpacked++;
1308
1309                 prev = k;
1310                 k = bkey_next(k);
1311         }
1312
1313         BUG_ON(!prev);
1314
1315         n1->key.k.p = bkey_unpack_pos(n1, prev);
1316         n1->data->max_key = n1->key.k.p;
1317         n2->data->min_key =
1318                 btree_type_successor(n1->btree_id, n1->key.k.p);
1319
1320         set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
1321         set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
1322
1323         set_btree_bset_end(n1, n1->set);
1324         set_btree_bset_end(n2, n2->set);
1325
1326         n2->nr.live_u64s        = le16_to_cpu(set2->u64s);
1327         n2->nr.bset_u64s[0]     = le16_to_cpu(set2->u64s);
1328         n2->nr.packed_keys      = n1->nr.packed_keys - nr_packed;
1329         n2->nr.unpacked_keys    = n1->nr.unpacked_keys - nr_unpacked;
1330
1331         n1->nr.live_u64s        = le16_to_cpu(set1->u64s);
1332         n1->nr.bset_u64s[0]     = le16_to_cpu(set1->u64s);
1333         n1->nr.packed_keys      = nr_packed;
1334         n1->nr.unpacked_keys    = nr_unpacked;
1335
1336         BUG_ON(!set1->u64s);
1337         BUG_ON(!set2->u64s);
1338
1339         memcpy_u64s(set2->start,
1340                     vstruct_end(set1),
1341                     le16_to_cpu(set2->u64s));
1342
1343         btree_node_reset_sib_u64s(n1);
1344         btree_node_reset_sib_u64s(n2);
1345
1346         bch_verify_btree_nr_keys(n1);
1347         bch_verify_btree_nr_keys(n2);
1348
1349         if (n1->level) {
1350                 btree_node_interior_verify(n1);
1351                 btree_node_interior_verify(n2);
1352         }
1353
1354         return n2;
1355 }
1356
1357 /*
1358  * For updates to interior nodes, we've got to do the insert before we split
1359  * because the stuff we're inserting has to be inserted atomically. Post split,
1360  * the keys might have to go in different nodes and the split would no longer be
1361  * atomic.
1362  *
1363  * Worse, if the insert is from btree node coalescing, if we do the insert after
1364  * we do the split (and pick the pivot) - the pivot we pick might be between
1365  * nodes that were coalesced, and thus in the middle of a child node post
1366  * coalescing:
1367  */
1368 static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b,
1369                                     struct keylist *keys,
1370                                     struct btree_reserve *res)
1371 {
1372         struct btree_node_iter node_iter;
1373         struct bkey_i *k = bch_keylist_front(keys);
1374         struct bkey_packed *p;
1375         struct bset *i;
1376
1377         BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
1378
1379         bch_btree_node_iter_init(&node_iter, b, k->k.p, false, false);
1380
1381         while (!bch_keylist_empty(keys)) {
1382                 k = bch_keylist_front(keys);
1383
1384                 BUG_ON(bch_keylist_u64s(keys) >
1385                        bch_btree_keys_u64s_remaining(iter->c, b));
1386                 BUG_ON(bkey_cmp(k->k.p, b->data->min_key) < 0);
1387                 BUG_ON(bkey_cmp(k->k.p, b->data->max_key) > 0);
1388
1389                 bch_insert_fixup_btree_ptr(iter, b, k, &node_iter, &res->disk_res);
1390                 bch_keylist_pop_front(keys);
1391         }
1392
1393         /*
1394          * We can't tolerate whiteouts here - with whiteouts there can be
1395          * duplicate keys, and it would be rather bad if we picked a duplicate
1396          * for the pivot:
1397          */
1398         i = btree_bset_first(b);
1399         p = i->start;
1400         while (p != vstruct_last(i))
1401                 if (bkey_deleted(p)) {
1402                         le16_add_cpu(&i->u64s, -p->u64s);
1403                         set_btree_bset_end(b, b->set);
1404                         memmove_u64s_down(p, bkey_next(p),
1405                                           (u64 *) vstruct_last(i) -
1406                                           (u64 *) p);
1407                 } else
1408                         p = bkey_next(p);
1409
1410         BUG_ON(b->nsets != 1 ||
1411                b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
1412
1413         btree_node_interior_verify(b);
1414 }
1415
1416 static void btree_split(struct btree *b, struct btree_iter *iter,
1417                         struct keylist *insert_keys,
1418                         struct btree_reserve *reserve,
1419                         struct btree_interior_update *as)
1420 {
1421         struct cache_set *c = iter->c;
1422         struct btree *parent = iter->nodes[b->level + 1];
1423         struct btree *n1, *n2 = NULL, *n3 = NULL;
1424         u64 start_time = local_clock();
1425
1426         BUG_ON(!parent && (b != btree_node_root(c, b)));
1427         BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1428
1429         bch_btree_interior_update_will_free_node(c, as, b);
1430
1431         n1 = btree_node_alloc_replacement(c, b, reserve);
1432         if (b->level)
1433                 btree_split_insert_keys(iter, n1, insert_keys, reserve);
1434
1435         if (vstruct_blocks(n1->data, c->block_bits) > BTREE_SPLIT_THRESHOLD(c)) {
1436                 trace_bcache_btree_node_split(c, b, b->nr.live_u64s);
1437
1438                 n2 = __btree_split_node(iter, n1, reserve);
1439
1440                 bch_btree_build_aux_trees(n2);
1441                 bch_btree_build_aux_trees(n1);
1442                 six_unlock_write(&n2->lock);
1443                 six_unlock_write(&n1->lock);
1444
1445                 bch_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent, -1);
1446
1447                 /*
1448                  * Note that on recursive parent_keys == insert_keys, so we
1449                  * can't start adding new keys to parent_keys before emptying it
1450                  * out (which we did with btree_split_insert_keys() above)
1451                  */
1452                 bch_keylist_add(&as->parent_keys, &n1->key);
1453                 bch_keylist_add(&as->parent_keys, &n2->key);
1454
1455                 if (!parent) {
1456                         /* Depth increases, make a new root */
1457                         n3 = __btree_root_alloc(c, b->level + 1,
1458                                                 iter->btree_id,
1459                                                 reserve);
1460                         n3->sib_u64s[0] = U16_MAX;
1461                         n3->sib_u64s[1] = U16_MAX;
1462
1463                         btree_split_insert_keys(iter, n3, &as->parent_keys,
1464                                                 reserve);
1465                         bch_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent, -1);
1466                 }
1467         } else {
1468                 trace_bcache_btree_node_compact(c, b, b->nr.live_u64s);
1469
1470                 bch_btree_build_aux_trees(n1);
1471                 six_unlock_write(&n1->lock);
1472
1473                 bch_keylist_add(&as->parent_keys, &n1->key);
1474         }
1475
1476         bch_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent, -1);
1477
1478         /* New nodes all written, now make them visible: */
1479
1480         if (parent) {
1481                 /* Split a non root node */
1482                 bch_btree_insert_node(parent, iter, &as->parent_keys,
1483                                       reserve, as);
1484         } else if (n3) {
1485                 bch_btree_set_root(iter, n3, as, reserve);
1486         } else {
1487                 /* Root filled up but didn't need to be split */
1488                 bch_btree_set_root(iter, n1, as, reserve);
1489         }
1490
1491         btree_open_bucket_put(c, n1);
1492         if (n2)
1493                 btree_open_bucket_put(c, n2);
1494         if (n3)
1495                 btree_open_bucket_put(c, n3);
1496
1497         /*
1498          * Note - at this point other linked iterators could still have @b read
1499          * locked; we're depending on the bch_btree_iter_node_replace() calls
1500          * below removing all references to @b so we don't return with other
1501          * iterators pointing to a node they have locked that's been freed.
1502          *
1503          * We have to free the node first because the bch_iter_node_replace()
1504          * calls will drop _our_ iterator's reference - and intent lock - to @b.
1505          */
1506         bch_btree_node_free_inmem(iter, b);
1507
1508         /* Successful split, update the iterator to point to the new nodes: */
1509
1510         if (n3)
1511                 bch_btree_iter_node_replace(iter, n3);
1512         if (n2)
1513                 bch_btree_iter_node_replace(iter, n2);
1514         bch_btree_iter_node_replace(iter, n1);
1515
1516         bch_time_stats_update(&c->btree_split_time, start_time);
1517 }
1518
1519 /**
1520  * bch_btree_insert_node - insert bkeys into a given btree node
1521  *
1522  * @iter:               btree iterator
1523  * @insert_keys:        list of keys to insert
1524  * @hook:               insert callback
1525  * @persistent:         if not null, @persistent will wait on journal write
1526  *
1527  * Inserts as many keys as it can into a given btree node, splitting it if full.
1528  * If a split occurred, this function will return early. This can only happen
1529  * for leaf nodes -- inserts into interior nodes have to be atomic.
1530  */
1531 void bch_btree_insert_node(struct btree *b,
1532                            struct btree_iter *iter,
1533                            struct keylist *insert_keys,
1534                            struct btree_reserve *reserve,
1535                            struct btree_interior_update *as)
1536 {
1537         BUG_ON(!b->level);
1538         BUG_ON(!reserve || !as);
1539
1540         switch (bch_btree_insert_keys_interior(b, iter, insert_keys,
1541                                                as, reserve)) {
1542         case BTREE_INSERT_OK:
1543                 break;
1544         case BTREE_INSERT_BTREE_NODE_FULL:
1545                 btree_split(b, iter, insert_keys, reserve, as);
1546                 break;
1547         default:
1548                 BUG();
1549         }
1550 }
1551
1552 static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags)
1553 {
1554         struct cache_set *c = iter->c;
1555         struct btree *b = iter->nodes[0];
1556         struct btree_reserve *reserve;
1557         struct btree_interior_update *as;
1558         struct closure cl;
1559         int ret = 0;
1560
1561         closure_init_stack(&cl);
1562
1563         /* Hack, because gc and splitting nodes doesn't mix yet: */
1564         if (!down_read_trylock(&c->gc_lock)) {
1565                 bch_btree_iter_unlock(iter);
1566                 down_read(&c->gc_lock);
1567         }
1568
1569         /*
1570          * XXX: figure out how far we might need to split,
1571          * instead of locking/reserving all the way to the root:
1572          */
1573         if (!bch_btree_iter_set_locks_want(iter, U8_MAX)) {
1574                 ret = -EINTR;
1575                 goto out;
1576         }
1577
1578         reserve = bch_btree_reserve_get(c, b, 0, flags, &cl);
1579         if (IS_ERR(reserve)) {
1580                 ret = PTR_ERR(reserve);
1581                 if (ret == -EAGAIN) {
1582                         bch_btree_iter_unlock(iter);
1583                         up_read(&c->gc_lock);
1584                         closure_sync(&cl);
1585                         return -EINTR;
1586                 }
1587                 goto out;
1588         }
1589
1590         as = bch_btree_interior_update_alloc(c);
1591
1592         btree_split(b, iter, NULL, reserve, as);
1593         bch_btree_reserve_put(c, reserve);
1594
1595         bch_btree_iter_set_locks_want(iter, 1);
1596 out:
1597         up_read(&c->gc_lock);
1598         return ret;
1599 }
1600
1601 enum btree_node_sibling {
1602         btree_prev_sib,
1603         btree_next_sib,
1604 };
1605
1606 static struct btree *btree_node_get_sibling(struct btree_iter *iter,
1607                                             struct btree *b,
1608                                             enum btree_node_sibling sib)
1609 {
1610         struct btree *parent;
1611         struct btree_node_iter node_iter;
1612         struct bkey_packed *k;
1613         BKEY_PADDED(k) tmp;
1614         struct btree *ret;
1615         unsigned level = b->level;
1616
1617         parent = iter->nodes[level + 1];
1618         if (!parent)
1619                 return NULL;
1620
1621         if (!btree_node_relock(iter, level + 1)) {
1622                 bch_btree_iter_set_locks_want(iter, level + 2);
1623                 return ERR_PTR(-EINTR);
1624         }
1625
1626         node_iter = iter->node_iters[parent->level];
1627
1628         k = bch_btree_node_iter_peek_all(&node_iter, parent);
1629         BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
1630
1631         do {
1632                 k = sib == btree_prev_sib
1633                         ? bch_btree_node_iter_prev_all(&node_iter, parent)
1634                         : (bch_btree_node_iter_advance(&node_iter, parent),
1635                            bch_btree_node_iter_peek_all(&node_iter, parent));
1636                 if (!k)
1637                         return NULL;
1638         } while (bkey_deleted(k));
1639
1640         bkey_unpack(parent, &tmp.k, k);
1641
1642         ret = bch_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1643
1644         if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) {
1645                 btree_node_unlock(iter, level);
1646                 ret = bch_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1647         }
1648
1649         if (!IS_ERR(ret) && !btree_node_relock(iter, level)) {
1650                 six_unlock_intent(&ret->lock);
1651                 ret = ERR_PTR(-EINTR);
1652         }
1653
1654         return ret;
1655 }
1656
1657 static int __foreground_maybe_merge(struct btree_iter *iter,
1658                                     enum btree_node_sibling sib)
1659 {
1660         struct cache_set *c = iter->c;
1661         struct btree_reserve *reserve;
1662         struct btree_interior_update *as;
1663         struct bkey_format_state new_s;
1664         struct bkey_format new_f;
1665         struct bkey_i delete;
1666         struct btree *b, *m, *n, *prev, *next, *parent;
1667         struct closure cl;
1668         size_t sib_u64s;
1669         int ret = 0;
1670
1671         closure_init_stack(&cl);
1672 retry:
1673         if (!btree_node_relock(iter, iter->level))
1674                 return 0;
1675
1676         b = iter->nodes[iter->level];
1677
1678         parent = iter->nodes[b->level + 1];
1679         if (!parent)
1680                 return 0;
1681
1682         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1683                 return 0;
1684
1685         /* XXX: can't be holding read locks */
1686         m = btree_node_get_sibling(iter, b, sib);
1687         if (IS_ERR(m)) {
1688                 ret = PTR_ERR(m);
1689                 goto out;
1690         }
1691
1692         /* NULL means no sibling: */
1693         if (!m) {
1694                 b->sib_u64s[sib] = U16_MAX;
1695                 return 0;
1696         }
1697
1698         if (sib == btree_prev_sib) {
1699                 prev = m;
1700                 next = b;
1701         } else {
1702                 prev = b;
1703                 next = m;
1704         }
1705
1706         bch_bkey_format_init(&new_s);
1707         __bch_btree_calc_format(&new_s, b);
1708         __bch_btree_calc_format(&new_s, m);
1709         new_f = bch_bkey_format_done(&new_s);
1710
1711         sib_u64s = btree_node_u64s_with_format(b, &new_f) +
1712                 btree_node_u64s_with_format(m, &new_f);
1713
1714         if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
1715                 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1716                 sib_u64s /= 2;
1717                 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1718         }
1719
1720         sib_u64s = min(sib_u64s, btree_max_u64s(c));
1721         b->sib_u64s[sib] = sib_u64s;
1722
1723         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
1724                 six_unlock_intent(&m->lock);
1725                 return 0;
1726         }
1727
1728         /* We're changing btree topology, doesn't mix with gc: */
1729         if (!down_read_trylock(&c->gc_lock)) {
1730                 six_unlock_intent(&m->lock);
1731                 bch_btree_iter_unlock(iter);
1732
1733                 down_read(&c->gc_lock);
1734                 up_read(&c->gc_lock);
1735                 ret = -EINTR;
1736                 goto out;
1737         }
1738
1739         if (!bch_btree_iter_set_locks_want(iter, U8_MAX)) {
1740                 ret = -EINTR;
1741                 goto out_unlock;
1742         }
1743
1744         reserve = bch_btree_reserve_get(c, b, 0,
1745                                         BTREE_INSERT_NOFAIL|
1746                                         BTREE_INSERT_USE_RESERVE,
1747                                         &cl);
1748         if (IS_ERR(reserve)) {
1749                 ret = PTR_ERR(reserve);
1750                 goto out_unlock;
1751         }
1752
1753         as = bch_btree_interior_update_alloc(c);
1754
1755         bch_btree_interior_update_will_free_node(c, as, b);
1756         bch_btree_interior_update_will_free_node(c, as, m);
1757
1758         n = bch_btree_node_alloc(c, b->level, b->btree_id, reserve);
1759         n->data->min_key        = prev->data->min_key;
1760         n->data->max_key        = next->data->max_key;
1761         n->data->format         = new_f;
1762         n->key.k.p              = next->key.k.p;
1763
1764         btree_node_set_format(n, new_f);
1765
1766         bch_btree_sort_into(c, n, prev);
1767         bch_btree_sort_into(c, n, next);
1768
1769         bch_btree_build_aux_trees(n);
1770         six_unlock_write(&n->lock);
1771
1772         bkey_init(&delete.k);
1773         delete.k.p = prev->key.k.p;
1774         bch_keylist_add(&as->parent_keys, &delete);
1775         bch_keylist_add(&as->parent_keys, &n->key);
1776
1777         bch_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
1778
1779         bch_btree_insert_node(parent, iter, &as->parent_keys, reserve, as);
1780
1781         btree_open_bucket_put(c, n);
1782         bch_btree_node_free_inmem(iter, b);
1783         bch_btree_node_free_inmem(iter, m);
1784         bch_btree_iter_node_replace(iter, n);
1785
1786         bch_btree_iter_verify(iter, n);
1787
1788         bch_btree_reserve_put(c, reserve);
1789 out_unlock:
1790         if (ret != -EINTR && ret != -EAGAIN)
1791                 bch_btree_iter_set_locks_want(iter, 1);
1792         six_unlock_intent(&m->lock);
1793         up_read(&c->gc_lock);
1794 out:
1795         if (ret == -EAGAIN || ret == -EINTR) {
1796                 bch_btree_iter_unlock(iter);
1797                 ret = -EINTR;
1798         }
1799
1800         closure_sync(&cl);
1801
1802         if (ret == -EINTR) {
1803                 ret = bch_btree_iter_traverse(iter);
1804                 if (!ret)
1805                         goto retry;
1806         }
1807
1808         return ret;
1809 }
1810
1811 static int inline foreground_maybe_merge(struct btree_iter *iter,
1812                                          enum btree_node_sibling sib)
1813 {
1814         struct cache_set *c = iter->c;
1815         struct btree *b;
1816
1817         if (!btree_node_locked(iter, iter->level))
1818                 return 0;
1819
1820         b = iter->nodes[iter->level];
1821         if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1822                 return 0;
1823
1824         return __foreground_maybe_merge(iter, sib);
1825 }
1826
1827 /**
1828  * btree_insert_key - insert a key one key into a leaf node
1829  */
1830 static enum btree_insert_ret
1831 btree_insert_key(struct btree_insert *trans,
1832                  struct btree_insert_entry *insert)
1833 {
1834         struct cache_set *c = trans->c;
1835         struct btree_iter *iter = insert->iter;
1836         struct btree *b = iter->nodes[0];
1837         enum btree_insert_ret ret;
1838         int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
1839         int old_live_u64s = b->nr.live_u64s;
1840         int live_u64s_added, u64s_added;
1841
1842         ret = !btree_node_is_extents(b)
1843                 ? bch_insert_fixup_key(trans, insert)
1844                 : bch_insert_fixup_extent(trans, insert);
1845
1846         live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
1847         u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
1848
1849         if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
1850                 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
1851         if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
1852                 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
1853
1854         if (u64s_added > live_u64s_added &&
1855             bch_maybe_compact_whiteouts(iter->c, b))
1856                 bch_btree_iter_reinit_node(iter, b);
1857
1858         trace_bcache_btree_insert_key(c, b, insert->k);
1859         return ret;
1860 }
1861
1862 static bool same_leaf_as_prev(struct btree_insert *trans,
1863                               struct btree_insert_entry *i)
1864 {
1865         /*
1866          * Because we sorted the transaction entries, if multiple iterators
1867          * point to the same leaf node they'll always be adjacent now:
1868          */
1869         return i != trans->entries &&
1870                 i[0].iter->nodes[0] == i[-1].iter->nodes[0];
1871 }
1872
1873 #define trans_for_each_entry(trans, i)                                  \
1874         for ((i) = (trans)->entries; (i) < (trans)->entries + (trans)->nr; (i)++)
1875
1876 static void multi_lock_write(struct btree_insert *trans)
1877 {
1878         struct btree_insert_entry *i;
1879
1880         trans_for_each_entry(trans, i)
1881                 if (!same_leaf_as_prev(trans, i))
1882                         btree_node_lock_for_insert(i->iter->nodes[0], i->iter);
1883 }
1884
1885 static void multi_unlock_write(struct btree_insert *trans)
1886 {
1887         struct btree_insert_entry *i;
1888
1889         trans_for_each_entry(trans, i)
1890                 if (!same_leaf_as_prev(trans, i))
1891                         btree_node_unlock_write(i->iter->nodes[0], i->iter);
1892 }
1893
1894 static int btree_trans_entry_cmp(const void *_l, const void *_r)
1895 {
1896         const struct btree_insert_entry *l = _l;
1897         const struct btree_insert_entry *r = _r;
1898
1899         return btree_iter_cmp(l->iter, r->iter);
1900 }
1901
1902 /* Normal update interface: */
1903
1904 /**
1905  * __bch_btree_insert_at - insert keys at given iterator positions
1906  *
1907  * This is main entry point for btree updates.
1908  *
1909  * Return values:
1910  * -EINTR: locking changed, this function should be called again. Only returned
1911  *  if passed BTREE_INSERT_ATOMIC.
1912  * -EROFS: cache set read only
1913  * -EIO: journal or btree node IO error
1914  */
1915 int __bch_btree_insert_at(struct btree_insert *trans)
1916 {
1917         struct cache_set *c = trans->c;
1918         struct btree_insert_entry *i;
1919         struct btree_iter *split = NULL;
1920         bool cycle_gc_lock = false;
1921         unsigned u64s;
1922         int ret;
1923
1924         trans_for_each_entry(trans, i) {
1925                 EBUG_ON(i->iter->level);
1926                 EBUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
1927         }
1928
1929         sort(trans->entries, trans->nr, sizeof(trans->entries[0]),
1930              btree_trans_entry_cmp, NULL);
1931
1932         if (unlikely(!percpu_ref_tryget(&c->writes)))
1933                 return -EROFS;
1934 retry_locks:
1935         ret = -EINTR;
1936         trans_for_each_entry(trans, i)
1937                 if (!bch_btree_iter_set_locks_want(i->iter, 1))
1938                         goto err;
1939 retry:
1940         trans->did_work = false;
1941         u64s = 0;
1942         trans_for_each_entry(trans, i)
1943                 if (!i->done)
1944                         u64s += jset_u64s(i->k->k.u64s + i->extra_res);
1945
1946         memset(&trans->journal_res, 0, sizeof(trans->journal_res));
1947
1948         ret = !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)
1949                 ? bch_journal_res_get(&c->journal,
1950                                       &trans->journal_res,
1951                                       u64s, u64s)
1952                 : 0;
1953         if (ret)
1954                 goto err;
1955
1956         multi_lock_write(trans);
1957
1958         u64s = 0;
1959         trans_for_each_entry(trans, i) {
1960                 /* Multiple inserts might go to same leaf: */
1961                 if (!same_leaf_as_prev(trans, i))
1962                         u64s = 0;
1963
1964                 /*
1965                  * bch_btree_node_insert_fits() must be called under write lock:
1966                  * with only an intent lock, another thread can still call
1967                  * bch_btree_node_write(), converting an unwritten bset to a
1968                  * written one
1969                  */
1970                 if (!i->done) {
1971                         u64s += i->k->k.u64s + i->extra_res;
1972                         if (!bch_btree_node_insert_fits(c,
1973                                         i->iter->nodes[0], u64s)) {
1974                                 split = i->iter;
1975                                 goto unlock;
1976                         }
1977                 }
1978         }
1979
1980         ret = 0;
1981         split = NULL;
1982         cycle_gc_lock = false;
1983
1984         trans_for_each_entry(trans, i) {
1985                 if (i->done)
1986                         continue;
1987
1988                 switch (btree_insert_key(trans, i)) {
1989                 case BTREE_INSERT_OK:
1990                         i->done = true;
1991                         break;
1992                 case BTREE_INSERT_JOURNAL_RES_FULL:
1993                 case BTREE_INSERT_NEED_TRAVERSE:
1994                         ret = -EINTR;
1995                         break;
1996                 case BTREE_INSERT_NEED_RESCHED:
1997                         ret = -EAGAIN;
1998                         break;
1999                 case BTREE_INSERT_BTREE_NODE_FULL:
2000                         split = i->iter;
2001                         break;
2002                 case BTREE_INSERT_ENOSPC:
2003                         ret = -ENOSPC;
2004                         break;
2005                 case BTREE_INSERT_NEED_GC_LOCK:
2006                         cycle_gc_lock = true;
2007                         ret = -EINTR;
2008                         break;
2009                 default:
2010                         BUG();
2011                 }
2012
2013                 if (!trans->did_work && (ret || split))
2014                         break;
2015         }
2016 unlock:
2017         multi_unlock_write(trans);
2018         bch_journal_res_put(&c->journal, &trans->journal_res);
2019
2020         if (split)
2021                 goto split;
2022         if (ret)
2023                 goto err;
2024
2025         /*
2026          * hack: iterators are inconsistent when they hit end of leaf, until
2027          * traversed again
2028          */
2029         trans_for_each_entry(trans, i)
2030                 if (i->iter->at_end_of_leaf)
2031                         goto out;
2032
2033         trans_for_each_entry(trans, i)
2034                 if (!same_leaf_as_prev(trans, i)) {
2035                         foreground_maybe_merge(i->iter, btree_prev_sib);
2036                         foreground_maybe_merge(i->iter, btree_next_sib);
2037                 }
2038 out:
2039         /* make sure we didn't lose an error: */
2040         if (!ret && IS_ENABLED(CONFIG_BCACHE_DEBUG))
2041                 trans_for_each_entry(trans, i)
2042                         BUG_ON(!i->done);
2043
2044         percpu_ref_put(&c->writes);
2045         return ret;
2046 split:
2047         /*
2048          * have to drop journal res before splitting, because splitting means
2049          * allocating new btree nodes, and holding a journal reservation
2050          * potentially blocks the allocator:
2051          */
2052         ret = bch_btree_split_leaf(split, trans->flags);
2053         if (ret)
2054                 goto err;
2055         /*
2056          * if the split didn't have to drop locks the insert will still be
2057          * atomic (in the BTREE_INSERT_ATOMIC sense, what the caller peeked()
2058          * and is overwriting won't have changed)
2059          */
2060         goto retry_locks;
2061 err:
2062         if (cycle_gc_lock) {
2063                 down_read(&c->gc_lock);
2064                 up_read(&c->gc_lock);
2065         }
2066
2067         if (ret == -EINTR) {
2068                 trans_for_each_entry(trans, i) {
2069                         int ret2 = bch_btree_iter_traverse(i->iter);
2070                         if (ret2) {
2071                                 ret = ret2;
2072                                 goto out;
2073                         }
2074                 }
2075
2076                 /*
2077                  * BTREE_ITER_ATOMIC means we have to return -EINTR if we
2078                  * dropped locks:
2079                  */
2080                 if (!(trans->flags & BTREE_INSERT_ATOMIC))
2081                         goto retry;
2082         }
2083
2084         goto out;
2085 }
2086
2087 int bch_btree_insert_list_at(struct btree_iter *iter,
2088                              struct keylist *keys,
2089                              struct disk_reservation *disk_res,
2090                              struct extent_insert_hook *hook,
2091                              u64 *journal_seq, unsigned flags)
2092 {
2093         BUG_ON(flags & BTREE_INSERT_ATOMIC);
2094         BUG_ON(bch_keylist_empty(keys));
2095         verify_keys_sorted(keys);
2096
2097         while (!bch_keylist_empty(keys)) {
2098                 /* need to traverse between each insert */
2099                 int ret = bch_btree_iter_traverse(iter);
2100                 if (ret)
2101                         return ret;
2102
2103                 ret = bch_btree_insert_at(iter->c, disk_res, hook,
2104                                 journal_seq, flags,
2105                                 BTREE_INSERT_ENTRY(iter, bch_keylist_front(keys)));
2106                 if (ret)
2107                         return ret;
2108
2109                 bch_keylist_pop_front(keys);
2110         }
2111
2112         return 0;
2113 }
2114
2115 /**
2116  * bch_btree_insert_check_key - insert dummy key into btree
2117  *
2118  * We insert a random key on a cache miss, then compare exchange on it
2119  * once the cache promotion or backing device read completes. This
2120  * ensures that if this key is written to after the read, the read will
2121  * lose and not overwrite the key with stale data.
2122  *
2123  * Return values:
2124  * -EAGAIN: @iter->cl was put on a waitlist waiting for btree node allocation
2125  * -EINTR: btree node was changed while upgrading to write lock
2126  */
2127 int bch_btree_insert_check_key(struct btree_iter *iter,
2128                                struct bkey_i *check_key)
2129 {
2130         struct bpos saved_pos = iter->pos;
2131         struct bkey_i_cookie *cookie;
2132         BKEY_PADDED(key) tmp;
2133         int ret;
2134
2135         BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&check_key->k)));
2136
2137         check_key->k.type = KEY_TYPE_COOKIE;
2138         set_bkey_val_bytes(&check_key->k, sizeof(struct bch_cookie));
2139
2140         cookie = bkey_i_to_cookie(check_key);
2141         get_random_bytes(&cookie->v, sizeof(cookie->v));
2142
2143         bkey_copy(&tmp.key, check_key);
2144
2145         ret = bch_btree_insert_at(iter->c, NULL, NULL, NULL,
2146                                   BTREE_INSERT_ATOMIC,
2147                                   BTREE_INSERT_ENTRY(iter, &tmp.key));
2148
2149         bch_btree_iter_rewind(iter, saved_pos);
2150
2151         return ret;
2152 }
2153
2154 /**
2155  * bch_btree_insert - insert keys into the extent btree
2156  * @c:                  pointer to struct cache_set
2157  * @id:                 btree to insert into
2158  * @insert_keys:        list of keys to insert
2159  * @hook:               insert callback
2160  */
2161 int bch_btree_insert(struct cache_set *c, enum btree_id id,
2162                      struct bkey_i *k,
2163                      struct disk_reservation *disk_res,
2164                      struct extent_insert_hook *hook,
2165                      u64 *journal_seq, int flags)
2166 {
2167         struct btree_iter iter;
2168         int ret, ret2;
2169
2170         bch_btree_iter_init_intent(&iter, c, id, bkey_start_pos(&k->k));
2171
2172         ret = bch_btree_iter_traverse(&iter);
2173         if (unlikely(ret))
2174                 goto out;
2175
2176         ret = bch_btree_insert_at(c, disk_res, hook, journal_seq, flags,
2177                                   BTREE_INSERT_ENTRY(&iter, k));
2178 out:    ret2 = bch_btree_iter_unlock(&iter);
2179
2180         return ret ?: ret2;
2181 }
2182
2183 /**
2184  * bch_btree_update - like bch_btree_insert(), but asserts that we're
2185  * overwriting an existing key
2186  */
2187 int bch_btree_update(struct cache_set *c, enum btree_id id,
2188                      struct bkey_i *k, u64 *journal_seq)
2189 {
2190         struct btree_iter iter;
2191         struct bkey_s_c u;
2192         int ret;
2193
2194         EBUG_ON(id == BTREE_ID_EXTENTS);
2195
2196         bch_btree_iter_init_intent(&iter, c, id, k->k.p);
2197
2198         u = bch_btree_iter_peek_with_holes(&iter);
2199         ret = btree_iter_err(u);
2200         if (ret)
2201                 return ret;
2202
2203         if (bkey_deleted(u.k)) {
2204                 bch_btree_iter_unlock(&iter);
2205                 return -ENOENT;
2206         }
2207
2208         ret = bch_btree_insert_at(c, NULL, NULL, journal_seq, 0,
2209                                   BTREE_INSERT_ENTRY(&iter, k));
2210         bch_btree_iter_unlock(&iter);
2211         return ret;
2212 }
2213
2214 /*
2215  * bch_btree_delete_range - delete everything within a given range
2216  *
2217  * Range is a half open interval - [start, end)
2218  */
2219 int bch_btree_delete_range(struct cache_set *c, enum btree_id id,
2220                            struct bpos start,
2221                            struct bpos end,
2222                            struct bversion version,
2223                            struct disk_reservation *disk_res,
2224                            struct extent_insert_hook *hook,
2225                            u64 *journal_seq)
2226 {
2227         struct btree_iter iter;
2228         struct bkey_s_c k;
2229         int ret = 0;
2230
2231         bch_btree_iter_init_intent(&iter, c, id, start);
2232
2233         while ((k = bch_btree_iter_peek(&iter)).k &&
2234                !(ret = btree_iter_err(k))) {
2235                 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
2236                 /* really shouldn't be using a bare, unpadded bkey_i */
2237                 struct bkey_i delete;
2238
2239                 if (bkey_cmp(iter.pos, end) >= 0)
2240                         break;
2241
2242                 bkey_init(&delete.k);
2243
2244                 /*
2245                  * For extents, iter.pos won't necessarily be the same as
2246                  * bkey_start_pos(k.k) (for non extents they always will be the
2247                  * same). It's important that we delete starting from iter.pos
2248                  * because the range we want to delete could start in the middle
2249                  * of k.
2250                  *
2251                  * (bch_btree_iter_peek() does guarantee that iter.pos >=
2252                  * bkey_start_pos(k.k)).
2253                  */
2254                 delete.k.p = iter.pos;
2255                 delete.k.version = version;
2256
2257                 if (iter.is_extents) {
2258                         /*
2259                          * The extents btree is special - KEY_TYPE_DISCARD is
2260                          * used for deletions, not KEY_TYPE_DELETED. This is an
2261                          * internal implementation detail that probably
2262                          * shouldn't be exposed (internally, KEY_TYPE_DELETED is
2263                          * used as a proxy for k->size == 0):
2264                          */
2265                         delete.k.type = KEY_TYPE_DISCARD;
2266
2267                         /* create the biggest key we can */
2268                         bch_key_resize(&delete.k, max_sectors);
2269                         bch_cut_back(end, &delete.k);
2270                 }
2271
2272                 ret = bch_btree_insert_at(c, disk_res, hook, journal_seq,
2273                                           BTREE_INSERT_NOFAIL,
2274                                           BTREE_INSERT_ENTRY(&iter, &delete));
2275                 if (ret)
2276                         break;
2277
2278                 bch_btree_iter_cond_resched(&iter);
2279         }
2280
2281         bch_btree_iter_unlock(&iter);
2282         return ret;
2283 }
2284
2285 /**
2286  * bch_btree_node_rewrite - Rewrite/move a btree node
2287  *
2288  * Returns 0 on success, -EINTR or -EAGAIN on failure (i.e.
2289  * btree_check_reserve() has to wait)
2290  */
2291 int bch_btree_node_rewrite(struct btree_iter *iter, struct btree *b,
2292                            struct closure *cl)
2293 {
2294         struct cache_set *c = iter->c;
2295         struct btree *n, *parent = iter->nodes[b->level + 1];
2296         struct btree_reserve *reserve;
2297         struct btree_interior_update *as;
2298         unsigned flags = BTREE_INSERT_NOFAIL;
2299
2300         /*
2301          * if caller is going to wait if allocating reserve fails, then this is
2302          * a rewrite that must succeed:
2303          */
2304         if (cl)
2305                 flags |= BTREE_INSERT_USE_RESERVE;
2306
2307         if (!bch_btree_iter_set_locks_want(iter, U8_MAX))
2308                 return -EINTR;
2309
2310         reserve = bch_btree_reserve_get(c, b, 0, flags, cl);
2311         if (IS_ERR(reserve)) {
2312                 trace_bcache_btree_gc_rewrite_node_fail(c, b);
2313                 return PTR_ERR(reserve);
2314         }
2315
2316         as = bch_btree_interior_update_alloc(c);
2317
2318         bch_btree_interior_update_will_free_node(c, as, b);
2319
2320         n = btree_node_alloc_replacement(c, b, reserve);
2321
2322         bch_btree_build_aux_trees(n);
2323         six_unlock_write(&n->lock);
2324
2325         trace_bcache_btree_gc_rewrite_node(c, b);
2326
2327         bch_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
2328
2329         if (parent) {
2330                 bch_btree_insert_node(parent, iter,
2331                                       &keylist_single(&n->key),
2332                                       reserve, as);
2333         } else {
2334                 bch_btree_set_root(iter, n, as, reserve);
2335         }
2336
2337         btree_open_bucket_put(c, n);
2338
2339         bch_btree_node_free_inmem(iter, b);
2340
2341         BUG_ON(!bch_btree_iter_node_replace(iter, n));
2342
2343         bch_btree_reserve_put(c, reserve);
2344         return 0;
2345 }