4 #include "bkey_methods.h"
5 #include "btree_cache.h"
7 #include "btree_update.h"
9 #include "btree_iter.h"
10 #include "btree_locking.h"
17 #include <linux/random.h>
18 #include <linux/sort.h>
19 #include <trace/events/bcachefs.h>
21 static void btree_interior_update_updated_root(struct bch_fs *,
22 struct btree_interior_update *,
25 /* Calculate ideal packed bkey format for new btree nodes: */
27 void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
29 struct bkey_packed *k;
33 bch2_bkey_format_add_pos(s, b->data->min_key);
36 for (k = btree_bkey_first(b, t);
37 k != btree_bkey_last(b, t);
39 if (!bkey_whiteout(k)) {
40 uk = bkey_unpack_key(b, k);
41 bch2_bkey_format_add_key(s, &uk);
45 static struct bkey_format bch2_btree_calc_format(struct btree *b)
47 struct bkey_format_state s;
49 bch2_bkey_format_init(&s);
50 __bch2_btree_calc_format(&s, b);
52 return bch2_bkey_format_done(&s);
55 static size_t btree_node_u64s_with_format(struct btree *b,
56 struct bkey_format *new_f)
58 struct bkey_format *old_f = &b->format;
60 /* stupid integer promotion rules */
62 (((int) new_f->key_u64s - old_f->key_u64s) *
63 (int) b->nr.packed_keys) +
64 (((int) new_f->key_u64s - BKEY_U64s) *
65 (int) b->nr.unpacked_keys);
67 BUG_ON(delta + b->nr.live_u64s < 0);
69 return b->nr.live_u64s + delta;
73 * btree_node_format_fits - check if we could rewrite node with a new format
75 * This assumes all keys can pack with the new format -- it just checks if
76 * the re-packed keys would fit inside the node itself.
78 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
79 struct bkey_format *new_f)
81 size_t u64s = btree_node_u64s_with_format(b, new_f);
83 return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
86 /* Btree node freeing/allocation: */
89 * We're doing the index update that makes @b unreachable, update stuff to
92 * Must be called _before_ btree_interior_update_updated_root() or
93 * btree_interior_update_updated_btree:
95 static void bch2_btree_node_free_index(struct bch_fs *c, struct btree *b,
96 enum btree_id id, struct bkey_s_c k,
97 struct bch_fs_usage *stats)
99 struct btree_interior_update *as;
100 struct pending_btree_node_free *d;
102 mutex_lock(&c->btree_interior_update_lock);
104 for_each_pending_btree_node_free(c, as, d)
105 if (!bkey_cmp(k.k->p, d->key.k.p) &&
106 bkey_val_bytes(k.k) == bkey_val_bytes(&d->key.k) &&
107 !memcmp(k.v, &d->key.v, bkey_val_bytes(k.k)))
112 d->index_update_done = true;
115 * Btree nodes are accounted as freed in bch_alloc_stats when they're
116 * freed from the index:
118 stats->s[S_COMPRESSED][S_META] -= c->sb.btree_node_size;
119 stats->s[S_UNCOMPRESSED][S_META] -= c->sb.btree_node_size;
122 * We're dropping @k from the btree, but it's still live until the
123 * index update is persistent so we need to keep a reference around for
124 * mark and sweep to find - that's primarily what the
125 * btree_node_pending_free list is for.
127 * So here (when we set index_update_done = true), we're moving an
128 * existing reference to a different part of the larger "gc keyspace" -
129 * and the new position comes after the old position, since GC marks
130 * the pending free list after it walks the btree.
132 * If we move the reference while mark and sweep is _between_ the old
133 * and the new position, mark and sweep will see the reference twice
134 * and it'll get double accounted - so check for that here and subtract
135 * to cancel out one of mark and sweep's markings if necessary:
139 * bch2_mark_key() compares the current gc pos to the pos we're
140 * moving this reference from, hence one comparison here:
142 if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
143 struct bch_fs_usage tmp = { 0 };
145 bch2_mark_key(c, bkey_i_to_s_c(&d->key),
146 -c->sb.btree_node_size, true, b
147 ? gc_pos_btree_node(b)
148 : gc_pos_btree_root(id),
151 * Don't apply tmp - pending deletes aren't tracked in
156 mutex_unlock(&c->btree_interior_update_lock);
159 static void __btree_node_free(struct bch_fs *c, struct btree *b,
160 struct btree_iter *iter)
162 trace_btree_node_free(c, b);
164 BUG_ON(b == btree_node_root(c, b));
166 BUG_ON(!list_empty(&b->write_blocked));
168 six_lock_write(&b->lock);
170 if (btree_node_dirty(b))
171 bch2_btree_complete_write(c, b, btree_current_write(b));
172 clear_btree_node_dirty(b);
174 bch2_btree_node_hash_remove(c, b);
176 mutex_lock(&c->btree_cache_lock);
177 list_move(&b->list, &c->btree_cache_freeable);
178 mutex_unlock(&c->btree_cache_lock);
181 * By using six_unlock_write() directly instead of
182 * bch2_btree_node_unlock_write(), we don't update the iterator's
183 * sequence numbers and cause future bch2_btree_node_relock() calls to
186 six_unlock_write(&b->lock);
189 void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
191 struct open_bucket *ob = b->ob;
195 __btree_node_free(c, b, NULL);
197 bch2_open_bucket_put(c, ob);
200 void bch2_btree_node_free_inmem(struct btree_iter *iter, struct btree *b)
202 bch2_btree_iter_node_drop_linked(iter, b);
204 __btree_node_free(iter->c, b, iter);
206 bch2_btree_iter_node_drop(iter, b);
209 static void bch2_btree_node_free_ondisk(struct bch_fs *c,
210 struct pending_btree_node_free *pending)
212 struct bch_fs_usage stats = { 0 };
214 BUG_ON(!pending->index_update_done);
216 bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
217 -c->sb.btree_node_size, true,
218 gc_phase(GC_PHASE_PENDING_DELETE),
221 * Don't apply stats - pending deletes aren't tracked in
226 void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *b)
228 bch2_open_bucket_put(c, b->ob);
232 static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
234 struct disk_reservation *res,
238 struct open_bucket *ob;
240 unsigned reserve = use_reserve ? 0 : BTREE_NODE_RESERVE;
242 mutex_lock(&c->btree_reserve_cache_lock);
243 if (c->btree_reserve_cache_nr > reserve) {
244 struct btree_alloc *a =
245 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
248 bkey_copy(&tmp.k, &a->k);
249 mutex_unlock(&c->btree_reserve_cache_lock);
252 mutex_unlock(&c->btree_reserve_cache_lock);
255 /* alloc_sectors is weird, I suppose */
256 bkey_extent_init(&tmp.k);
257 tmp.k.k.size = c->sb.btree_node_size,
259 ob = bch2_alloc_sectors(c, &c->btree_write_point,
260 bkey_i_to_extent(&tmp.k),
262 c->opts.metadata_replicas_required,
263 use_reserve ? RESERVE_BTREE : RESERVE_NONE,
268 if (tmp.k.k.size < c->sb.btree_node_size) {
269 bch2_open_bucket_put(c, ob);
273 b = bch2_btree_node_mem_alloc(c);
275 /* we hold cannibalize_lock: */
279 bkey_copy(&b->key, &tmp.k);
286 static struct btree *bch2_btree_node_alloc(struct bch_fs *c,
287 unsigned level, enum btree_id id,
288 struct btree_reserve *reserve)
292 BUG_ON(!reserve->nr);
294 b = reserve->b[--reserve->nr];
296 BUG_ON(bch2_btree_node_hash_insert(c, b, level, id));
298 set_btree_node_accessed(b);
299 set_btree_node_dirty(b);
301 bch2_bset_init_first(b, &b->data->keys);
302 memset(&b->nr, 0, sizeof(b->nr));
303 b->data->magic = cpu_to_le64(bset_magic(c));
305 SET_BTREE_NODE_ID(b->data, id);
306 SET_BTREE_NODE_LEVEL(b->data, level);
307 b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr;
309 bch2_btree_build_aux_trees(b);
311 bch2_check_mark_super(c, &b->key, true);
313 trace_btree_node_alloc(c, b);
317 struct btree *__bch2_btree_node_alloc_replacement(struct bch_fs *c,
319 struct bkey_format format,
320 struct btree_reserve *reserve)
324 n = bch2_btree_node_alloc(c, b->level, b->btree_id, reserve);
326 n->data->min_key = b->data->min_key;
327 n->data->max_key = b->data->max_key;
328 n->data->format = format;
330 btree_node_set_format(n, format);
332 bch2_btree_sort_into(c, n, b);
334 btree_node_reset_sib_u64s(n);
336 n->key.k.p = b->key.k.p;
340 static struct btree *bch2_btree_node_alloc_replacement(struct bch_fs *c,
342 struct btree_reserve *reserve)
344 struct bkey_format new_f = bch2_btree_calc_format(b);
347 * The keys might expand with the new format - if they wouldn't fit in
348 * the btree node anymore, use the old format for now:
350 if (!bch2_btree_node_format_fits(c, b, &new_f))
353 return __bch2_btree_node_alloc_replacement(c, b, new_f, reserve);
356 static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b,
357 struct btree_reserve *btree_reserve)
359 struct btree *old = btree_node_root(c, b);
361 /* Root nodes cannot be reaped */
362 mutex_lock(&c->btree_cache_lock);
363 list_del_init(&b->list);
364 mutex_unlock(&c->btree_cache_lock);
366 mutex_lock(&c->btree_root_lock);
367 btree_node_root(c, b) = b;
368 mutex_unlock(&c->btree_root_lock);
372 * New allocation (we're not being called because we're in
373 * bch2_btree_root_read()) - do marking while holding
376 struct bch_fs_usage stats = { 0 };
378 bch2_mark_key(c, bkey_i_to_s_c(&b->key),
379 c->sb.btree_node_size, true,
380 gc_pos_btree_root(b->btree_id),
384 bch2_btree_node_free_index(c, NULL, old->btree_id,
385 bkey_i_to_s_c(&old->key),
387 bch2_fs_usage_apply(c, &stats, &btree_reserve->disk_res,
388 gc_pos_btree_root(b->btree_id));
391 bch2_recalc_btree_reserve(c);
394 static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b)
396 struct btree_root *r = &c->btree_roots[b->btree_id];
398 mutex_lock(&c->btree_root_lock);
401 bkey_copy(&r->key, &b->key);
405 mutex_unlock(&c->btree_root_lock);
409 * Only for filesystem bringup, when first reading the btree roots or allocating
410 * btree roots when initializing a new filesystem:
412 void bch2_btree_set_root_initial(struct bch_fs *c, struct btree *b,
413 struct btree_reserve *btree_reserve)
415 BUG_ON(btree_node_root(c, b));
417 bch2_btree_set_root_inmem(c, b, btree_reserve);
418 bch2_btree_set_root_ondisk(c, b);
422 * bch_btree_set_root - update the root in memory and on disk
424 * To ensure forward progress, the current task must not be holding any
425 * btree node write locks. However, you must hold an intent lock on the
428 * Note: This allocates a journal entry but doesn't add any keys to
429 * it. All the btree roots are part of every journal write, so there
430 * is nothing new to be done. This just guarantees that there is a
433 static void bch2_btree_set_root(struct btree_iter *iter, struct btree *b,
434 struct btree_interior_update *as,
435 struct btree_reserve *btree_reserve)
437 struct bch_fs *c = iter->c;
440 trace_btree_set_root(c, b);
443 old = btree_node_root(c, b);
446 * Ensure no one is using the old root while we switch to the
449 bch2_btree_node_lock_write(old, iter);
451 bch2_btree_set_root_inmem(c, b, btree_reserve);
453 btree_interior_update_updated_root(c, as, iter->btree_id);
456 * Unlock old root after new root is visible:
458 * The new root isn't persistent, but that's ok: we still have
459 * an intent lock on the new root, and any updates that would
460 * depend on the new root would have to update the new root.
462 bch2_btree_node_unlock_write(old, iter);
465 static struct btree *__btree_root_alloc(struct bch_fs *c, unsigned level,
467 struct btree_reserve *reserve)
469 struct btree *b = bch2_btree_node_alloc(c, level, id, reserve);
471 b->data->min_key = POS_MIN;
472 b->data->max_key = POS_MAX;
473 b->data->format = bch2_btree_calc_format(b);
474 b->key.k.p = POS_MAX;
476 btree_node_set_format(b, b->data->format);
477 bch2_btree_build_aux_trees(b);
479 six_unlock_write(&b->lock);
484 void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
486 bch2_disk_reservation_put(c, &reserve->disk_res);
488 mutex_lock(&c->btree_reserve_cache_lock);
490 while (reserve->nr) {
491 struct btree *b = reserve->b[--reserve->nr];
493 six_unlock_write(&b->lock);
495 if (c->btree_reserve_cache_nr <
496 ARRAY_SIZE(c->btree_reserve_cache)) {
497 struct btree_alloc *a =
498 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
502 bkey_copy(&a->k, &b->key);
504 bch2_open_bucket_put(c, b->ob);
508 __btree_node_free(c, b, NULL);
510 six_unlock_intent(&b->lock);
513 mutex_unlock(&c->btree_reserve_cache_lock);
515 mempool_free(reserve, &c->btree_reserve_pool);
518 static struct btree_reserve *__bch2_btree_reserve_get(struct bch_fs *c,
523 struct btree_reserve *reserve;
525 struct disk_reservation disk_res = { 0, 0 };
526 unsigned sectors = nr_nodes * c->sb.btree_node_size;
527 int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD|
528 BCH_DISK_RESERVATION_METADATA;
530 if (flags & BTREE_INSERT_NOFAIL)
531 disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
534 * This check isn't necessary for correctness - it's just to potentially
535 * prevent us from doing a lot of work that'll end up being wasted:
537 ret = bch2_journal_error(&c->journal);
541 if (bch2_disk_reservation_get(c, &disk_res, sectors, disk_res_flags))
542 return ERR_PTR(-ENOSPC);
544 BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
547 * Protects reaping from the btree node cache and using the btree node
548 * open bucket reserve:
550 ret = bch2_btree_node_cannibalize_lock(c, cl);
552 bch2_disk_reservation_put(c, &disk_res);
556 reserve = mempool_alloc(&c->btree_reserve_pool, GFP_NOIO);
558 reserve->disk_res = disk_res;
561 while (reserve->nr < nr_nodes) {
562 b = __bch2_btree_node_alloc(c, flags & BTREE_INSERT_USE_RESERVE,
569 reserve->b[reserve->nr++] = b;
572 bch2_btree_node_cannibalize_unlock(c);
575 bch2_btree_reserve_put(c, reserve);
576 bch2_btree_node_cannibalize_unlock(c);
577 trace_btree_reserve_get_fail(c, nr_nodes, cl);
581 struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
583 unsigned extra_nodes,
587 unsigned depth = btree_node_root(c, b)->level - b->level;
588 unsigned nr_nodes = btree_reserve_required_nodes(depth) + extra_nodes;
590 return __bch2_btree_reserve_get(c, nr_nodes, flags, cl);
594 int bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id,
595 struct closure *writes)
598 struct btree_reserve *reserve;
601 closure_init_stack(&cl);
604 /* XXX haven't calculated capacity yet :/ */
605 reserve = __bch2_btree_reserve_get(c, 1, 0, &cl);
606 if (!IS_ERR(reserve))
609 if (PTR_ERR(reserve) == -ENOSPC)
610 return PTR_ERR(reserve);
615 b = __btree_root_alloc(c, 0, id, reserve);
617 bch2_btree_node_write(c, b, writes, SIX_LOCK_intent, -1);
619 bch2_btree_set_root_initial(c, b, reserve);
620 bch2_btree_open_bucket_put(c, b);
621 six_unlock_intent(&b->lock);
623 bch2_btree_reserve_put(c, reserve);
628 static void bch2_insert_fixup_btree_ptr(struct btree_iter *iter,
630 struct bkey_i *insert,
631 struct btree_node_iter *node_iter,
632 struct disk_reservation *disk_res)
634 struct bch_fs *c = iter->c;
635 struct bch_fs_usage stats = { 0 };
636 struct bkey_packed *k;
639 if (bkey_extent_is_data(&insert->k))
640 bch2_mark_key(c, bkey_i_to_s_c(insert),
641 c->sb.btree_node_size, true,
642 gc_pos_btree_node(b), &stats, 0);
644 while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
645 !btree_iter_pos_cmp_packed(b, &insert->k.p, k, false))
646 bch2_btree_node_iter_advance(node_iter, b);
649 * If we're overwriting, look up pending delete and mark so that gc
650 * marks it on the pending delete list:
652 if (k && !bkey_cmp_packed(b, k, &insert->k))
653 bch2_btree_node_free_index(c, b, iter->btree_id,
654 bkey_disassemble(b, k, &tmp),
657 bch2_fs_usage_apply(c, &stats, disk_res, gc_pos_btree_node(b));
659 bch2_btree_bset_insert_key(iter, b, node_iter, insert);
660 set_btree_node_dirty(b);
663 /* Inserting into a given leaf node (last stage of insert): */
665 /* Handle overwrites and do insert, for non extents: */
666 bool bch2_btree_bset_insert_key(struct btree_iter *iter,
668 struct btree_node_iter *node_iter,
669 struct bkey_i *insert)
671 const struct bkey_format *f = &b->format;
672 struct bkey_packed *k;
674 unsigned clobber_u64s;
676 EBUG_ON(btree_node_just_written(b));
677 EBUG_ON(bset_written(b, btree_bset_last(b)));
678 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
679 EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
680 bkey_cmp(insert->k.p, b->data->max_key) > 0);
681 BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(iter->c, b));
683 k = bch2_btree_node_iter_peek_all(node_iter, b);
684 if (k && !bkey_cmp_packed(b, k, &insert->k)) {
685 BUG_ON(bkey_whiteout(k));
687 t = bch2_bkey_to_bset(b, k);
689 if (bset_unwritten(b, bset(b, t)) &&
690 bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) {
691 BUG_ON(bkey_whiteout(k) != bkey_whiteout(&insert->k));
693 k->type = insert->k.type;
694 memcpy_u64s(bkeyp_val(f, k), &insert->v,
695 bkey_val_u64s(&insert->k));
699 insert->k.needs_whiteout = k->needs_whiteout;
701 btree_keys_account_key_drop(&b->nr, t - b->set, k);
703 if (t == bset_tree_last(b)) {
704 clobber_u64s = k->u64s;
707 * If we're deleting, and the key we're deleting doesn't
708 * need a whiteout (it wasn't overwriting a key that had
709 * been written to disk) - just delete it:
711 if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
712 bch2_bset_delete(b, k, clobber_u64s);
713 bch2_btree_node_iter_fix(iter, b, node_iter, t,
721 k->type = KEY_TYPE_DELETED;
722 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
725 if (bkey_whiteout(&insert->k)) {
726 reserve_whiteout(b, t, k);
729 k->needs_whiteout = false;
733 * Deleting, but the key to delete wasn't found - nothing to do:
735 if (bkey_whiteout(&insert->k))
738 insert->k.needs_whiteout = false;
741 t = bset_tree_last(b);
742 k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
745 bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
746 if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
747 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
748 clobber_u64s, k->u64s);
752 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
755 struct bch_fs *c = container_of(j, struct bch_fs, journal);
756 struct btree_write *w = container_of(pin, struct btree_write, journal);
757 struct btree *b = container_of(w, struct btree, writes[i]);
759 six_lock_read(&b->lock);
761 * Reusing a btree node can race with the journal reclaim code calling
762 * the journal pin flush fn, and there's no good fix for this: we don't
763 * really want journal_pin_drop() to block until the flush fn is no
764 * longer running, because journal_pin_drop() is called from the btree
765 * node write endio function, and we can't wait on the flush fn to
766 * finish running in mca_reap() - where we make reused btree nodes ready
767 * to use again - because there, we're holding the lock this function
770 * So, the b->level check is a hack so we don't try to write nodes we
774 bch2_btree_node_write(c, b, NULL, SIX_LOCK_read, i);
775 six_unlock_read(&b->lock);
778 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin)
780 return __btree_node_flush(j, pin, 0);
783 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin)
785 return __btree_node_flush(j, pin, 1);
788 void bch2_btree_journal_key(struct btree_insert *trans,
789 struct btree_iter *iter,
790 struct bkey_i *insert)
792 struct bch_fs *c = trans->c;
793 struct journal *j = &c->journal;
794 struct btree *b = iter->nodes[0];
795 struct btree_write *w = btree_current_write(b);
797 EBUG_ON(iter->level || b->level);
798 EBUG_ON(!trans->journal_res.ref &&
799 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
801 if (!journal_pin_active(&w->journal))
802 bch2_journal_pin_add(j, &w->journal,
803 btree_node_write_idx(b) == 0
805 : btree_node_flush1);
807 if (trans->journal_res.ref) {
808 u64 seq = trans->journal_res.seq;
809 bool needs_whiteout = insert->k.needs_whiteout;
812 * have a bug where we're seeing an extent with an invalid crc
813 * entry in the journal, trying to track it down:
815 BUG_ON(bch2_bkey_invalid(c, b->btree_id, bkey_i_to_s_c(insert)));
818 insert->k.needs_whiteout = false;
819 bch2_journal_add_keys(j, &trans->journal_res,
820 b->btree_id, insert);
821 insert->k.needs_whiteout = needs_whiteout;
823 if (trans->journal_seq)
824 *trans->journal_seq = seq;
825 btree_bset_last(b)->journal_seq = cpu_to_le64(seq);
828 if (!btree_node_dirty(b))
829 set_btree_node_dirty(b);
832 static enum btree_insert_ret
833 bch2_insert_fixup_key(struct btree_insert *trans,
834 struct btree_insert_entry *insert)
836 struct btree_iter *iter = insert->iter;
840 if (bch2_btree_bset_insert_key(iter,
842 &iter->node_iters[0],
844 bch2_btree_journal_key(trans, iter, insert->k);
846 trans->did_work = true;
847 return BTREE_INSERT_OK;
850 static void verify_keys_sorted(struct keylist *l)
852 #ifdef CONFIG_BCACHEFS_DEBUG
855 for_each_keylist_key(l, k)
856 BUG_ON(bkey_next(k) != l->top &&
857 bkey_cmp(k->k.p, bkey_next(k)->k.p) >= 0);
861 static void btree_node_lock_for_insert(struct btree *b, struct btree_iter *iter)
863 struct bch_fs *c = iter->c;
865 bch2_btree_node_lock_write(b, iter);
867 if (btree_node_just_written(b) &&
868 bch2_btree_post_write_cleanup(c, b))
869 bch2_btree_iter_reinit_node(iter, b);
872 * If the last bset has been written, or if it's gotten too big - start
873 * a new bset to insert into:
875 if (want_new_bset(c, b))
876 bch2_btree_init_next(c, b, iter);
879 /* Asynchronous interior node update machinery */
881 struct btree_interior_update *
882 bch2_btree_interior_update_alloc(struct bch_fs *c)
884 struct btree_interior_update *as;
886 as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
887 memset(as, 0, sizeof(*as));
888 closure_init(&as->cl, &c->cl);
890 as->mode = BTREE_INTERIOR_NO_UPDATE;
892 bch2_keylist_init(&as->parent_keys, as->inline_keys,
893 ARRAY_SIZE(as->inline_keys));
895 mutex_lock(&c->btree_interior_update_lock);
896 list_add(&as->list, &c->btree_interior_update_list);
897 mutex_unlock(&c->btree_interior_update_lock);
902 static void btree_interior_update_free(struct closure *cl)
904 struct btree_interior_update *as = container_of(cl, struct btree_interior_update, cl);
906 mempool_free(as, &as->c->btree_interior_update_pool);
909 static void btree_interior_update_nodes_reachable(struct closure *cl)
911 struct btree_interior_update *as =
912 container_of(cl, struct btree_interior_update, cl);
913 struct bch_fs *c = as->c;
916 bch2_journal_pin_drop(&c->journal, &as->journal);
918 mutex_lock(&c->btree_interior_update_lock);
920 for (i = 0; i < as->nr_pending; i++)
921 bch2_btree_node_free_ondisk(c, &as->pending[i]);
924 mutex_unlock(&c->btree_interior_update_lock);
926 mutex_lock(&c->btree_interior_update_lock);
928 mutex_unlock(&c->btree_interior_update_lock);
930 closure_wake_up(&as->wait);
932 closure_return_with_destructor(cl, btree_interior_update_free);
935 static void btree_interior_update_nodes_written(struct closure *cl)
937 struct btree_interior_update *as =
938 container_of(cl, struct btree_interior_update, cl);
939 struct bch_fs *c = as->c;
942 if (bch2_journal_error(&c->journal)) {
946 /* XXX: missing error handling, damnit */
948 /* check for journal error, bail out if we flushed */
951 * We did an update to a parent node where the pointers we added pointed
952 * to child nodes that weren't written yet: now, the child nodes have
953 * been written so we can write out the update to the interior node.
956 mutex_lock(&c->btree_interior_update_lock);
958 case BTREE_INTERIOR_NO_UPDATE:
960 case BTREE_INTERIOR_UPDATING_NODE:
961 /* The usual case: */
962 b = READ_ONCE(as->b);
964 if (!six_trylock_read(&b->lock)) {
965 mutex_unlock(&c->btree_interior_update_lock);
966 six_lock_read(&b->lock);
967 six_unlock_read(&b->lock);
971 BUG_ON(!btree_node_dirty(b));
972 closure_wait(&btree_current_write(b)->wait, cl);
974 list_del(&as->write_blocked_list);
976 if (list_empty(&b->write_blocked))
977 bch2_btree_node_write(c, b, NULL, SIX_LOCK_read, -1);
978 six_unlock_read(&b->lock);
981 case BTREE_INTERIOR_UPDATING_AS:
983 * The btree node we originally updated has been freed and is
984 * being rewritten - so we need to write anything here, we just
985 * need to signal to that btree_interior_update that it's ok to make the
986 * new replacement node visible:
988 closure_put(&as->parent_as->cl);
991 * and then we have to wait on that btree_interior_update to finish:
993 closure_wait(&as->parent_as->wait, cl);
996 case BTREE_INTERIOR_UPDATING_ROOT:
997 /* b is the new btree root: */
998 b = READ_ONCE(as->b);
1000 if (!six_trylock_read(&b->lock)) {
1001 mutex_unlock(&c->btree_interior_update_lock);
1002 six_lock_read(&b->lock);
1003 six_unlock_read(&b->lock);
1007 BUG_ON(c->btree_roots[b->btree_id].as != as);
1008 c->btree_roots[b->btree_id].as = NULL;
1010 bch2_btree_set_root_ondisk(c, b);
1013 * We don't have to wait anything anything here (before
1014 * btree_interior_update_nodes_reachable frees the old nodes
1015 * ondisk) - we've ensured that the very next journal write will
1016 * have the pointer to the new root, and before the allocator
1017 * can reuse the old nodes it'll have to do a journal commit:
1019 six_unlock_read(&b->lock);
1021 mutex_unlock(&c->btree_interior_update_lock);
1023 continue_at(cl, btree_interior_update_nodes_reachable, system_wq);
1027 * We're updating @b with pointers to nodes that haven't finished writing yet:
1028 * block @b from being written until @as completes
1030 static void btree_interior_update_updated_btree(struct bch_fs *c,
1031 struct btree_interior_update *as,
1034 mutex_lock(&c->btree_interior_update_lock);
1036 BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1037 BUG_ON(!btree_node_dirty(b));
1039 as->mode = BTREE_INTERIOR_UPDATING_NODE;
1041 list_add(&as->write_blocked_list, &b->write_blocked);
1043 mutex_unlock(&c->btree_interior_update_lock);
1045 bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
1047 continue_at(&as->cl, btree_interior_update_nodes_written,
1048 system_freezable_wq);
1051 static void btree_interior_update_updated_root(struct bch_fs *c,
1052 struct btree_interior_update *as,
1053 enum btree_id btree_id)
1055 struct btree_root *r = &c->btree_roots[btree_id];
1057 mutex_lock(&c->btree_interior_update_lock);
1059 BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1062 * Old root might not be persistent yet - if so, redirect its
1063 * btree_interior_update operation to point to us:
1066 BUG_ON(r->as->mode != BTREE_INTERIOR_UPDATING_ROOT);
1069 r->as->mode = BTREE_INTERIOR_UPDATING_AS;
1070 r->as->parent_as = as;
1071 closure_get(&as->cl);
1074 as->mode = BTREE_INTERIOR_UPDATING_ROOT;
1078 mutex_unlock(&c->btree_interior_update_lock);
1080 bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
1082 continue_at(&as->cl, btree_interior_update_nodes_written,
1083 system_freezable_wq);
1086 static void interior_update_flush(struct journal *j, struct journal_entry_pin *pin)
1088 struct btree_interior_update *as =
1089 container_of(pin, struct btree_interior_update, journal);
1091 bch2_journal_flush_seq_async(j, as->journal_seq, NULL);
1095 * @b is being split/rewritten: it may have pointers to not-yet-written btree
1096 * nodes and thus outstanding btree_interior_updates - redirect @b's
1097 * btree_interior_updates to point to this btree_interior_update:
1099 void bch2_btree_interior_update_will_free_node(struct bch_fs *c,
1100 struct btree_interior_update *as,
1103 struct btree_interior_update *p, *n;
1104 struct pending_btree_node_free *d;
1105 struct bset_tree *t;
1108 * Does this node have data that hasn't been written in the journal?
1110 * If so, we have to wait for the corresponding journal entry to be
1111 * written before making the new nodes reachable - we can't just carry
1112 * over the bset->journal_seq tracking, since we'll be mixing those keys
1113 * in with keys that aren't in the journal anymore:
1116 as->journal_seq = max(as->journal_seq, bset(b, t)->journal_seq);
1119 * Does this node have unwritten data that has a pin on the journal?
1121 * If so, transfer that pin to the btree_interior_update operation -
1122 * note that if we're freeing multiple nodes, we only need to keep the
1123 * oldest pin of any of the nodes we're freeing. We'll release the pin
1124 * when the new nodes are persistent and reachable on disk:
1126 bch2_journal_pin_add_if_older(&c->journal,
1127 &b->writes[0].journal,
1128 &as->journal, interior_update_flush);
1129 bch2_journal_pin_add_if_older(&c->journal,
1130 &b->writes[1].journal,
1131 &as->journal, interior_update_flush);
1133 mutex_lock(&c->btree_interior_update_lock);
1136 * Does this node have any btree_interior_update operations preventing
1137 * it from being written?
1139 * If so, redirect them to point to this btree_interior_update: we can
1140 * write out our new nodes, but we won't make them visible until those
1141 * operations complete
1143 list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
1144 BUG_ON(p->mode != BTREE_INTERIOR_UPDATING_NODE);
1146 p->mode = BTREE_INTERIOR_UPDATING_AS;
1147 list_del(&p->write_blocked_list);
1150 closure_get(&as->cl);
1153 /* Add this node to the list of nodes being freed: */
1154 BUG_ON(as->nr_pending >= ARRAY_SIZE(as->pending));
1156 d = &as->pending[as->nr_pending++];
1157 d->index_update_done = false;
1158 d->seq = b->data->keys.seq;
1159 d->btree_id = b->btree_id;
1160 d->level = b->level;
1161 bkey_copy(&d->key, &b->key);
1163 mutex_unlock(&c->btree_interior_update_lock);
1166 static void btree_node_interior_verify(struct btree *b)
1168 struct btree_node_iter iter;
1169 struct bkey_packed *k;
1173 bch2_btree_node_iter_init(&iter, b, b->key.k.p, false, false);
1175 BUG_ON(!(k = bch2_btree_node_iter_peek(&iter, b)) ||
1176 bkey_cmp_left_packed(b, k, &b->key.k.p));
1178 BUG_ON((bch2_btree_node_iter_advance(&iter, b),
1179 !bch2_btree_node_iter_end(&iter)));
1184 k = bch2_btree_node_iter_peek(&iter, b);
1188 msg = "isn't what it should be";
1189 if (bkey_cmp_left_packed(b, k, &b->key.k.p))
1192 bch2_btree_node_iter_advance(&iter, b);
1194 msg = "isn't last key";
1195 if (!bch2_btree_node_iter_end(&iter))
1199 bch2_dump_btree_node(b);
1200 printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode,
1201 b->key.k.p.offset, msg);
1206 static enum btree_insert_ret
1207 bch2_btree_insert_keys_interior(struct btree *b,
1208 struct btree_iter *iter,
1209 struct keylist *insert_keys,
1210 struct btree_interior_update *as,
1211 struct btree_reserve *res)
1213 struct bch_fs *c = iter->c;
1214 struct btree_iter *linked;
1215 struct btree_node_iter node_iter;
1216 struct bkey_i *insert = bch2_keylist_front(insert_keys);
1217 struct bkey_packed *k;
1219 BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1221 BUG_ON(!as || as->b);
1222 verify_keys_sorted(insert_keys);
1224 btree_node_lock_for_insert(b, iter);
1226 if (bch_keylist_u64s(insert_keys) >
1227 bch_btree_keys_u64s_remaining(c, b)) {
1228 bch2_btree_node_unlock_write(b, iter);
1229 return BTREE_INSERT_BTREE_NODE_FULL;
1232 /* Don't screw up @iter's position: */
1233 node_iter = iter->node_iters[b->level];
1236 * btree_split(), btree_gc_coalesce() will insert keys before
1237 * the iterator's current position - they know the keys go in
1238 * the node the iterator points to:
1240 while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
1241 (bkey_cmp_packed(b, k, &insert->k) >= 0))
1244 while (!bch2_keylist_empty(insert_keys)) {
1245 insert = bch2_keylist_front(insert_keys);
1247 bch2_insert_fixup_btree_ptr(iter, b, insert,
1248 &node_iter, &res->disk_res);
1249 bch2_keylist_pop_front(insert_keys);
1252 btree_interior_update_updated_btree(c, as, b);
1254 for_each_linked_btree_node(iter, b, linked)
1255 bch2_btree_node_iter_peek(&linked->node_iters[b->level],
1257 bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
1259 bch2_btree_iter_verify(iter, b);
1261 if (bch2_maybe_compact_whiteouts(c, b))
1262 bch2_btree_iter_reinit_node(iter, b);
1264 bch2_btree_node_unlock_write(b, iter);
1266 btree_node_interior_verify(b);
1267 return BTREE_INSERT_OK;
1271 * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1274 static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n1,
1275 struct btree_reserve *reserve)
1277 size_t nr_packed = 0, nr_unpacked = 0;
1279 struct bset *set1, *set2;
1280 struct bkey_packed *k, *prev = NULL;
1282 n2 = bch2_btree_node_alloc(iter->c, n1->level, iter->btree_id, reserve);
1283 n2->data->max_key = n1->data->max_key;
1284 n2->data->format = n1->format;
1285 n2->key.k.p = n1->key.k.p;
1287 btree_node_set_format(n2, n2->data->format);
1289 set1 = btree_bset_first(n1);
1290 set2 = btree_bset_first(n2);
1293 * Has to be a linear search because we don't have an auxiliary
1298 if (bkey_next(k) == vstruct_last(set1))
1300 if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
1314 n1->key.k.p = bkey_unpack_pos(n1, prev);
1315 n1->data->max_key = n1->key.k.p;
1317 btree_type_successor(n1->btree_id, n1->key.k.p);
1319 set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
1320 set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
1322 set_btree_bset_end(n1, n1->set);
1323 set_btree_bset_end(n2, n2->set);
1325 n2->nr.live_u64s = le16_to_cpu(set2->u64s);
1326 n2->nr.bset_u64s[0] = le16_to_cpu(set2->u64s);
1327 n2->nr.packed_keys = n1->nr.packed_keys - nr_packed;
1328 n2->nr.unpacked_keys = n1->nr.unpacked_keys - nr_unpacked;
1330 n1->nr.live_u64s = le16_to_cpu(set1->u64s);
1331 n1->nr.bset_u64s[0] = le16_to_cpu(set1->u64s);
1332 n1->nr.packed_keys = nr_packed;
1333 n1->nr.unpacked_keys = nr_unpacked;
1335 BUG_ON(!set1->u64s);
1336 BUG_ON(!set2->u64s);
1338 memcpy_u64s(set2->start,
1340 le16_to_cpu(set2->u64s));
1342 btree_node_reset_sib_u64s(n1);
1343 btree_node_reset_sib_u64s(n2);
1345 bch2_verify_btree_nr_keys(n1);
1346 bch2_verify_btree_nr_keys(n2);
1349 btree_node_interior_verify(n1);
1350 btree_node_interior_verify(n2);
1357 * For updates to interior nodes, we've got to do the insert before we split
1358 * because the stuff we're inserting has to be inserted atomically. Post split,
1359 * the keys might have to go in different nodes and the split would no longer be
1362 * Worse, if the insert is from btree node coalescing, if we do the insert after
1363 * we do the split (and pick the pivot) - the pivot we pick might be between
1364 * nodes that were coalesced, and thus in the middle of a child node post
1367 static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b,
1368 struct keylist *keys,
1369 struct btree_reserve *res)
1371 struct btree_node_iter node_iter;
1372 struct bkey_i *k = bch2_keylist_front(keys);
1373 struct bkey_packed *p;
1376 BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
1378 bch2_btree_node_iter_init(&node_iter, b, k->k.p, false, false);
1380 while (!bch2_keylist_empty(keys)) {
1381 k = bch2_keylist_front(keys);
1383 BUG_ON(bch_keylist_u64s(keys) >
1384 bch_btree_keys_u64s_remaining(iter->c, b));
1385 BUG_ON(bkey_cmp(k->k.p, b->data->min_key) < 0);
1386 BUG_ON(bkey_cmp(k->k.p, b->data->max_key) > 0);
1388 bch2_insert_fixup_btree_ptr(iter, b, k, &node_iter, &res->disk_res);
1389 bch2_keylist_pop_front(keys);
1393 * We can't tolerate whiteouts here - with whiteouts there can be
1394 * duplicate keys, and it would be rather bad if we picked a duplicate
1397 i = btree_bset_first(b);
1399 while (p != vstruct_last(i))
1400 if (bkey_deleted(p)) {
1401 le16_add_cpu(&i->u64s, -p->u64s);
1402 set_btree_bset_end(b, b->set);
1403 memmove_u64s_down(p, bkey_next(p),
1404 (u64 *) vstruct_last(i) -
1409 BUG_ON(b->nsets != 1 ||
1410 b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
1412 btree_node_interior_verify(b);
1415 static void btree_split(struct btree *b, struct btree_iter *iter,
1416 struct keylist *insert_keys,
1417 struct btree_reserve *reserve,
1418 struct btree_interior_update *as)
1420 struct bch_fs *c = iter->c;
1421 struct btree *parent = iter->nodes[b->level + 1];
1422 struct btree *n1, *n2 = NULL, *n3 = NULL;
1423 u64 start_time = local_clock();
1425 BUG_ON(!parent && (b != btree_node_root(c, b)));
1426 BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1428 bch2_btree_interior_update_will_free_node(c, as, b);
1430 n1 = bch2_btree_node_alloc_replacement(c, b, reserve);
1432 btree_split_insert_keys(iter, n1, insert_keys, reserve);
1434 if (vstruct_blocks(n1->data, c->block_bits) > BTREE_SPLIT_THRESHOLD(c)) {
1435 trace_btree_node_split(c, b, b->nr.live_u64s);
1437 n2 = __btree_split_node(iter, n1, reserve);
1439 bch2_btree_build_aux_trees(n2);
1440 bch2_btree_build_aux_trees(n1);
1441 six_unlock_write(&n2->lock);
1442 six_unlock_write(&n1->lock);
1444 bch2_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent, -1);
1447 * Note that on recursive parent_keys == insert_keys, so we
1448 * can't start adding new keys to parent_keys before emptying it
1449 * out (which we did with btree_split_insert_keys() above)
1451 bch2_keylist_add(&as->parent_keys, &n1->key);
1452 bch2_keylist_add(&as->parent_keys, &n2->key);
1455 /* Depth increases, make a new root */
1456 n3 = __btree_root_alloc(c, b->level + 1,
1459 n3->sib_u64s[0] = U16_MAX;
1460 n3->sib_u64s[1] = U16_MAX;
1462 btree_split_insert_keys(iter, n3, &as->parent_keys,
1464 bch2_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent, -1);
1467 trace_btree_node_compact(c, b, b->nr.live_u64s);
1469 bch2_btree_build_aux_trees(n1);
1470 six_unlock_write(&n1->lock);
1472 bch2_keylist_add(&as->parent_keys, &n1->key);
1475 bch2_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent, -1);
1477 /* New nodes all written, now make them visible: */
1480 /* Split a non root node */
1481 bch2_btree_insert_node(parent, iter, &as->parent_keys,
1484 bch2_btree_set_root(iter, n3, as, reserve);
1486 /* Root filled up but didn't need to be split */
1487 bch2_btree_set_root(iter, n1, as, reserve);
1490 bch2_btree_open_bucket_put(c, n1);
1492 bch2_btree_open_bucket_put(c, n2);
1494 bch2_btree_open_bucket_put(c, n3);
1497 * Note - at this point other linked iterators could still have @b read
1498 * locked; we're depending on the bch2_btree_iter_node_replace() calls
1499 * below removing all references to @b so we don't return with other
1500 * iterators pointing to a node they have locked that's been freed.
1502 * We have to free the node first because the bch2_iter_node_replace()
1503 * calls will drop _our_ iterator's reference - and intent lock - to @b.
1505 bch2_btree_node_free_inmem(iter, b);
1507 /* Successful split, update the iterator to point to the new nodes: */
1510 bch2_btree_iter_node_replace(iter, n3);
1512 bch2_btree_iter_node_replace(iter, n2);
1513 bch2_btree_iter_node_replace(iter, n1);
1515 bch2_time_stats_update(&c->btree_split_time, start_time);
1519 * bch_btree_insert_node - insert bkeys into a given btree node
1521 * @iter: btree iterator
1522 * @insert_keys: list of keys to insert
1523 * @hook: insert callback
1524 * @persistent: if not null, @persistent will wait on journal write
1526 * Inserts as many keys as it can into a given btree node, splitting it if full.
1527 * If a split occurred, this function will return early. This can only happen
1528 * for leaf nodes -- inserts into interior nodes have to be atomic.
1530 void bch2_btree_insert_node(struct btree *b,
1531 struct btree_iter *iter,
1532 struct keylist *insert_keys,
1533 struct btree_reserve *reserve,
1534 struct btree_interior_update *as)
1537 BUG_ON(!reserve || !as);
1539 switch (bch2_btree_insert_keys_interior(b, iter, insert_keys,
1541 case BTREE_INSERT_OK:
1543 case BTREE_INSERT_BTREE_NODE_FULL:
1544 btree_split(b, iter, insert_keys, reserve, as);
1551 static int bch2_btree_split_leaf(struct btree_iter *iter, unsigned flags)
1553 struct bch_fs *c = iter->c;
1554 struct btree *b = iter->nodes[0];
1555 struct btree_reserve *reserve;
1556 struct btree_interior_update *as;
1560 closure_init_stack(&cl);
1562 /* Hack, because gc and splitting nodes doesn't mix yet: */
1563 if (!down_read_trylock(&c->gc_lock)) {
1564 bch2_btree_iter_unlock(iter);
1565 down_read(&c->gc_lock);
1569 * XXX: figure out how far we might need to split,
1570 * instead of locking/reserving all the way to the root:
1572 if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1577 reserve = bch2_btree_reserve_get(c, b, 0, flags, &cl);
1578 if (IS_ERR(reserve)) {
1579 ret = PTR_ERR(reserve);
1580 if (ret == -EAGAIN) {
1581 bch2_btree_iter_unlock(iter);
1582 up_read(&c->gc_lock);
1589 as = bch2_btree_interior_update_alloc(c);
1591 btree_split(b, iter, NULL, reserve, as);
1592 bch2_btree_reserve_put(c, reserve);
1594 bch2_btree_iter_set_locks_want(iter, 1);
1596 up_read(&c->gc_lock);
1600 enum btree_node_sibling {
1605 static struct btree *btree_node_get_sibling(struct btree_iter *iter,
1607 enum btree_node_sibling sib)
1609 struct btree *parent;
1610 struct btree_node_iter node_iter;
1611 struct bkey_packed *k;
1614 unsigned level = b->level;
1616 parent = iter->nodes[level + 1];
1620 if (!bch2_btree_node_relock(iter, level + 1)) {
1621 bch2_btree_iter_set_locks_want(iter, level + 2);
1622 return ERR_PTR(-EINTR);
1625 node_iter = iter->node_iters[parent->level];
1627 k = bch2_btree_node_iter_peek_all(&node_iter, parent);
1628 BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
1631 k = sib == btree_prev_sib
1632 ? bch2_btree_node_iter_prev_all(&node_iter, parent)
1633 : (bch2_btree_node_iter_advance(&node_iter, parent),
1634 bch2_btree_node_iter_peek_all(&node_iter, parent));
1637 } while (bkey_deleted(k));
1639 bch2_bkey_unpack(parent, &tmp.k, k);
1641 ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1643 if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) {
1644 btree_node_unlock(iter, level);
1645 ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1648 if (!IS_ERR(ret) && !bch2_btree_node_relock(iter, level)) {
1649 six_unlock_intent(&ret->lock);
1650 ret = ERR_PTR(-EINTR);
1656 static int __foreground_maybe_merge(struct btree_iter *iter,
1657 enum btree_node_sibling sib)
1659 struct bch_fs *c = iter->c;
1660 struct btree_reserve *reserve;
1661 struct btree_interior_update *as;
1662 struct bkey_format_state new_s;
1663 struct bkey_format new_f;
1664 struct bkey_i delete;
1665 struct btree *b, *m, *n, *prev, *next, *parent;
1670 closure_init_stack(&cl);
1672 if (!bch2_btree_node_relock(iter, iter->level))
1675 b = iter->nodes[iter->level];
1677 parent = iter->nodes[b->level + 1];
1681 if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1684 /* XXX: can't be holding read locks */
1685 m = btree_node_get_sibling(iter, b, sib);
1691 /* NULL means no sibling: */
1693 b->sib_u64s[sib] = U16_MAX;
1697 if (sib == btree_prev_sib) {
1705 bch2_bkey_format_init(&new_s);
1706 __bch2_btree_calc_format(&new_s, b);
1707 __bch2_btree_calc_format(&new_s, m);
1708 new_f = bch2_bkey_format_done(&new_s);
1710 sib_u64s = btree_node_u64s_with_format(b, &new_f) +
1711 btree_node_u64s_with_format(m, &new_f);
1713 if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
1714 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1716 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1719 sib_u64s = min(sib_u64s, btree_max_u64s(c));
1720 b->sib_u64s[sib] = sib_u64s;
1722 if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
1723 six_unlock_intent(&m->lock);
1727 /* We're changing btree topology, doesn't mix with gc: */
1728 if (!down_read_trylock(&c->gc_lock)) {
1729 six_unlock_intent(&m->lock);
1730 bch2_btree_iter_unlock(iter);
1732 down_read(&c->gc_lock);
1733 up_read(&c->gc_lock);
1738 if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1743 reserve = bch2_btree_reserve_get(c, b, 0,
1744 BTREE_INSERT_NOFAIL|
1745 BTREE_INSERT_USE_RESERVE,
1747 if (IS_ERR(reserve)) {
1748 ret = PTR_ERR(reserve);
1752 as = bch2_btree_interior_update_alloc(c);
1754 bch2_btree_interior_update_will_free_node(c, as, b);
1755 bch2_btree_interior_update_will_free_node(c, as, m);
1757 n = bch2_btree_node_alloc(c, b->level, b->btree_id, reserve);
1758 n->data->min_key = prev->data->min_key;
1759 n->data->max_key = next->data->max_key;
1760 n->data->format = new_f;
1761 n->key.k.p = next->key.k.p;
1763 btree_node_set_format(n, new_f);
1765 bch2_btree_sort_into(c, n, prev);
1766 bch2_btree_sort_into(c, n, next);
1768 bch2_btree_build_aux_trees(n);
1769 six_unlock_write(&n->lock);
1771 bkey_init(&delete.k);
1772 delete.k.p = prev->key.k.p;
1773 bch2_keylist_add(&as->parent_keys, &delete);
1774 bch2_keylist_add(&as->parent_keys, &n->key);
1776 bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
1778 bch2_btree_insert_node(parent, iter, &as->parent_keys, reserve, as);
1780 bch2_btree_open_bucket_put(c, n);
1781 bch2_btree_node_free_inmem(iter, b);
1782 bch2_btree_node_free_inmem(iter, m);
1783 bch2_btree_iter_node_replace(iter, n);
1785 bch2_btree_iter_verify(iter, n);
1787 bch2_btree_reserve_put(c, reserve);
1789 if (ret != -EINTR && ret != -EAGAIN)
1790 bch2_btree_iter_set_locks_want(iter, 1);
1791 six_unlock_intent(&m->lock);
1792 up_read(&c->gc_lock);
1794 if (ret == -EAGAIN || ret == -EINTR) {
1795 bch2_btree_iter_unlock(iter);
1801 if (ret == -EINTR) {
1802 ret = bch2_btree_iter_traverse(iter);
1810 static int inline foreground_maybe_merge(struct btree_iter *iter,
1811 enum btree_node_sibling sib)
1813 struct bch_fs *c = iter->c;
1816 if (!btree_node_locked(iter, iter->level))
1819 b = iter->nodes[iter->level];
1820 if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1823 return __foreground_maybe_merge(iter, sib);
1827 * btree_insert_key - insert a key one key into a leaf node
1829 static enum btree_insert_ret
1830 btree_insert_key(struct btree_insert *trans,
1831 struct btree_insert_entry *insert)
1833 struct bch_fs *c = trans->c;
1834 struct btree_iter *iter = insert->iter;
1835 struct btree *b = iter->nodes[0];
1836 enum btree_insert_ret ret;
1837 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
1838 int old_live_u64s = b->nr.live_u64s;
1839 int live_u64s_added, u64s_added;
1841 ret = !btree_node_is_extents(b)
1842 ? bch2_insert_fixup_key(trans, insert)
1843 : bch2_insert_fixup_extent(trans, insert);
1845 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
1846 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
1848 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
1849 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
1850 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
1851 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
1853 if (u64s_added > live_u64s_added &&
1854 bch2_maybe_compact_whiteouts(iter->c, b))
1855 bch2_btree_iter_reinit_node(iter, b);
1857 trace_btree_insert_key(c, b, insert->k);
1861 static bool same_leaf_as_prev(struct btree_insert *trans,
1862 struct btree_insert_entry *i)
1865 * Because we sorted the transaction entries, if multiple iterators
1866 * point to the same leaf node they'll always be adjacent now:
1868 return i != trans->entries &&
1869 i[0].iter->nodes[0] == i[-1].iter->nodes[0];
1872 #define trans_for_each_entry(trans, i) \
1873 for ((i) = (trans)->entries; (i) < (trans)->entries + (trans)->nr; (i)++)
1875 static void multi_lock_write(struct btree_insert *trans)
1877 struct btree_insert_entry *i;
1879 trans_for_each_entry(trans, i)
1880 if (!same_leaf_as_prev(trans, i))
1881 btree_node_lock_for_insert(i->iter->nodes[0], i->iter);
1884 static void multi_unlock_write(struct btree_insert *trans)
1886 struct btree_insert_entry *i;
1888 trans_for_each_entry(trans, i)
1889 if (!same_leaf_as_prev(trans, i))
1890 bch2_btree_node_unlock_write(i->iter->nodes[0], i->iter);
1893 static int btree_trans_entry_cmp(const void *_l, const void *_r)
1895 const struct btree_insert_entry *l = _l;
1896 const struct btree_insert_entry *r = _r;
1898 return btree_iter_cmp(l->iter, r->iter);
1901 /* Normal update interface: */
1904 * __bch_btree_insert_at - insert keys at given iterator positions
1906 * This is main entry point for btree updates.
1909 * -EINTR: locking changed, this function should be called again. Only returned
1910 * if passed BTREE_INSERT_ATOMIC.
1911 * -EROFS: filesystem read only
1912 * -EIO: journal or btree node IO error
1914 int __bch2_btree_insert_at(struct btree_insert *trans)
1916 struct bch_fs *c = trans->c;
1917 struct btree_insert_entry *i;
1918 struct btree_iter *split = NULL;
1919 bool cycle_gc_lock = false;
1923 trans_for_each_entry(trans, i) {
1924 EBUG_ON(i->iter->level);
1925 EBUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
1928 sort(trans->entries, trans->nr, sizeof(trans->entries[0]),
1929 btree_trans_entry_cmp, NULL);
1931 if (unlikely(!percpu_ref_tryget(&c->writes)))
1935 trans_for_each_entry(trans, i)
1936 if (!bch2_btree_iter_set_locks_want(i->iter, 1))
1939 trans->did_work = false;
1941 trans_for_each_entry(trans, i)
1943 u64s += jset_u64s(i->k->k.u64s + i->extra_res);
1945 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
1947 ret = !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)
1948 ? bch2_journal_res_get(&c->journal,
1949 &trans->journal_res,
1955 multi_lock_write(trans);
1958 trans_for_each_entry(trans, i) {
1959 /* Multiple inserts might go to same leaf: */
1960 if (!same_leaf_as_prev(trans, i))
1964 * bch2_btree_node_insert_fits() must be called under write lock:
1965 * with only an intent lock, another thread can still call
1966 * bch2_btree_node_write(), converting an unwritten bset to a
1970 u64s += i->k->k.u64s + i->extra_res;
1971 if (!bch2_btree_node_insert_fits(c,
1972 i->iter->nodes[0], u64s)) {
1981 cycle_gc_lock = false;
1983 trans_for_each_entry(trans, i) {
1987 switch (btree_insert_key(trans, i)) {
1988 case BTREE_INSERT_OK:
1991 case BTREE_INSERT_JOURNAL_RES_FULL:
1992 case BTREE_INSERT_NEED_TRAVERSE:
1995 case BTREE_INSERT_NEED_RESCHED:
1998 case BTREE_INSERT_BTREE_NODE_FULL:
2001 case BTREE_INSERT_ENOSPC:
2004 case BTREE_INSERT_NEED_GC_LOCK:
2005 cycle_gc_lock = true;
2012 if (!trans->did_work && (ret || split))
2016 multi_unlock_write(trans);
2017 bch2_journal_res_put(&c->journal, &trans->journal_res);
2025 * hack: iterators are inconsistent when they hit end of leaf, until
2028 trans_for_each_entry(trans, i)
2029 if (i->iter->at_end_of_leaf)
2032 trans_for_each_entry(trans, i)
2033 if (!same_leaf_as_prev(trans, i)) {
2034 foreground_maybe_merge(i->iter, btree_prev_sib);
2035 foreground_maybe_merge(i->iter, btree_next_sib);
2038 /* make sure we didn't lose an error: */
2039 if (!ret && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
2040 trans_for_each_entry(trans, i)
2043 percpu_ref_put(&c->writes);
2047 * have to drop journal res before splitting, because splitting means
2048 * allocating new btree nodes, and holding a journal reservation
2049 * potentially blocks the allocator:
2051 ret = bch2_btree_split_leaf(split, trans->flags);
2055 * if the split didn't have to drop locks the insert will still be
2056 * atomic (in the BTREE_INSERT_ATOMIC sense, what the caller peeked()
2057 * and is overwriting won't have changed)
2061 if (cycle_gc_lock) {
2062 down_read(&c->gc_lock);
2063 up_read(&c->gc_lock);
2066 if (ret == -EINTR) {
2067 trans_for_each_entry(trans, i) {
2068 int ret2 = bch2_btree_iter_traverse(i->iter);
2076 * BTREE_ITER_ATOMIC means we have to return -EINTR if we
2079 if (!(trans->flags & BTREE_INSERT_ATOMIC))
2086 int bch2_btree_insert_list_at(struct btree_iter *iter,
2087 struct keylist *keys,
2088 struct disk_reservation *disk_res,
2089 struct extent_insert_hook *hook,
2090 u64 *journal_seq, unsigned flags)
2092 BUG_ON(flags & BTREE_INSERT_ATOMIC);
2093 BUG_ON(bch2_keylist_empty(keys));
2094 verify_keys_sorted(keys);
2096 while (!bch2_keylist_empty(keys)) {
2097 /* need to traverse between each insert */
2098 int ret = bch2_btree_iter_traverse(iter);
2102 ret = bch2_btree_insert_at(iter->c, disk_res, hook,
2104 BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
2108 bch2_keylist_pop_front(keys);
2115 * bch_btree_insert_check_key - insert dummy key into btree
2117 * We insert a random key on a cache miss, then compare exchange on it
2118 * once the cache promotion or backing device read completes. This
2119 * ensures that if this key is written to after the read, the read will
2120 * lose and not overwrite the key with stale data.
2123 * -EAGAIN: @iter->cl was put on a waitlist waiting for btree node allocation
2124 * -EINTR: btree node was changed while upgrading to write lock
2126 int bch2_btree_insert_check_key(struct btree_iter *iter,
2127 struct bkey_i *check_key)
2129 struct bpos saved_pos = iter->pos;
2130 struct bkey_i_cookie *cookie;
2131 BKEY_PADDED(key) tmp;
2134 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&check_key->k)));
2136 check_key->k.type = KEY_TYPE_COOKIE;
2137 set_bkey_val_bytes(&check_key->k, sizeof(struct bch_cookie));
2139 cookie = bkey_i_to_cookie(check_key);
2140 get_random_bytes(&cookie->v, sizeof(cookie->v));
2142 bkey_copy(&tmp.key, check_key);
2144 ret = bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
2145 BTREE_INSERT_ATOMIC,
2146 BTREE_INSERT_ENTRY(iter, &tmp.key));
2148 bch2_btree_iter_rewind(iter, saved_pos);
2154 * bch_btree_insert - insert keys into the extent btree
2155 * @c: pointer to struct bch_fs
2156 * @id: btree to insert into
2157 * @insert_keys: list of keys to insert
2158 * @hook: insert callback
2160 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
2162 struct disk_reservation *disk_res,
2163 struct extent_insert_hook *hook,
2164 u64 *journal_seq, int flags)
2166 struct btree_iter iter;
2169 bch2_btree_iter_init_intent(&iter, c, id, bkey_start_pos(&k->k));
2171 ret = bch2_btree_iter_traverse(&iter);
2175 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
2176 BTREE_INSERT_ENTRY(&iter, k));
2177 out: ret2 = bch2_btree_iter_unlock(&iter);
2183 * bch_btree_update - like bch2_btree_insert(), but asserts that we're
2184 * overwriting an existing key
2186 int bch2_btree_update(struct bch_fs *c, enum btree_id id,
2187 struct bkey_i *k, u64 *journal_seq)
2189 struct btree_iter iter;
2193 EBUG_ON(id == BTREE_ID_EXTENTS);
2195 bch2_btree_iter_init_intent(&iter, c, id, k->k.p);
2197 u = bch2_btree_iter_peek_with_holes(&iter);
2198 ret = btree_iter_err(u);
2202 if (bkey_deleted(u.k)) {
2203 bch2_btree_iter_unlock(&iter);
2207 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, 0,
2208 BTREE_INSERT_ENTRY(&iter, k));
2209 bch2_btree_iter_unlock(&iter);
2214 * bch_btree_delete_range - delete everything within a given range
2216 * Range is a half open interval - [start, end)
2218 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
2221 struct bversion version,
2222 struct disk_reservation *disk_res,
2223 struct extent_insert_hook *hook,
2226 struct btree_iter iter;
2230 bch2_btree_iter_init_intent(&iter, c, id, start);
2232 while ((k = bch2_btree_iter_peek(&iter)).k &&
2233 !(ret = btree_iter_err(k))) {
2234 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
2235 /* really shouldn't be using a bare, unpadded bkey_i */
2236 struct bkey_i delete;
2238 if (bkey_cmp(iter.pos, end) >= 0)
2241 bkey_init(&delete.k);
2244 * For extents, iter.pos won't necessarily be the same as
2245 * bkey_start_pos(k.k) (for non extents they always will be the
2246 * same). It's important that we delete starting from iter.pos
2247 * because the range we want to delete could start in the middle
2250 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
2251 * bkey_start_pos(k.k)).
2253 delete.k.p = iter.pos;
2254 delete.k.version = version;
2256 if (iter.is_extents) {
2258 * The extents btree is special - KEY_TYPE_DISCARD is
2259 * used for deletions, not KEY_TYPE_DELETED. This is an
2260 * internal implementation detail that probably
2261 * shouldn't be exposed (internally, KEY_TYPE_DELETED is
2262 * used as a proxy for k->size == 0):
2264 delete.k.type = KEY_TYPE_DISCARD;
2266 /* create the biggest key we can */
2267 bch2_key_resize(&delete.k, max_sectors);
2268 bch2_cut_back(end, &delete.k);
2271 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
2272 BTREE_INSERT_NOFAIL,
2273 BTREE_INSERT_ENTRY(&iter, &delete));
2277 bch2_btree_iter_cond_resched(&iter);
2280 bch2_btree_iter_unlock(&iter);
2285 * bch_btree_node_rewrite - Rewrite/move a btree node
2287 * Returns 0 on success, -EINTR or -EAGAIN on failure (i.e.
2288 * btree_check_reserve() has to wait)
2290 int bch2_btree_node_rewrite(struct btree_iter *iter, struct btree *b,
2293 struct bch_fs *c = iter->c;
2294 struct btree *n, *parent = iter->nodes[b->level + 1];
2295 struct btree_reserve *reserve;
2296 struct btree_interior_update *as;
2297 unsigned flags = BTREE_INSERT_NOFAIL;
2300 * if caller is going to wait if allocating reserve fails, then this is
2301 * a rewrite that must succeed:
2304 flags |= BTREE_INSERT_USE_RESERVE;
2306 if (!bch2_btree_iter_set_locks_want(iter, U8_MAX))
2309 reserve = bch2_btree_reserve_get(c, b, 0, flags, cl);
2310 if (IS_ERR(reserve)) {
2311 trace_btree_gc_rewrite_node_fail(c, b);
2312 return PTR_ERR(reserve);
2315 as = bch2_btree_interior_update_alloc(c);
2317 bch2_btree_interior_update_will_free_node(c, as, b);
2319 n = bch2_btree_node_alloc_replacement(c, b, reserve);
2321 bch2_btree_build_aux_trees(n);
2322 six_unlock_write(&n->lock);
2324 trace_btree_gc_rewrite_node(c, b);
2326 bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
2329 bch2_btree_insert_node(parent, iter,
2330 &keylist_single(&n->key),
2333 bch2_btree_set_root(iter, n, as, reserve);
2336 bch2_btree_open_bucket_put(c, n);
2338 bch2_btree_node_free_inmem(iter, b);
2340 BUG_ON(!bch2_btree_iter_node_replace(iter, n));
2342 bch2_btree_reserve_put(c, reserve);