4 #include "bkey_methods.h"
5 #include "btree_cache.h"
7 #include "btree_update.h"
9 #include "btree_iter.h"
10 #include "btree_locking.h"
17 #include <linux/random.h>
18 #include <linux/sort.h>
19 #include <trace/events/bcachefs.h>
21 static void btree_interior_update_updated_root(struct bch_fs *,
22 struct btree_interior_update *,
24 static void btree_interior_update_will_make_reachable(struct bch_fs *,
25 struct btree_interior_update *,
27 static void btree_interior_update_drop_new_node(struct bch_fs *,
30 /* Calculate ideal packed bkey format for new btree nodes: */
32 void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
34 struct bkey_packed *k;
38 bch2_bkey_format_add_pos(s, b->data->min_key);
41 for (k = btree_bkey_first(b, t);
42 k != btree_bkey_last(b, t);
44 if (!bkey_whiteout(k)) {
45 uk = bkey_unpack_key(b, k);
46 bch2_bkey_format_add_key(s, &uk);
50 static struct bkey_format bch2_btree_calc_format(struct btree *b)
52 struct bkey_format_state s;
54 bch2_bkey_format_init(&s);
55 __bch2_btree_calc_format(&s, b);
57 return bch2_bkey_format_done(&s);
60 static size_t btree_node_u64s_with_format(struct btree *b,
61 struct bkey_format *new_f)
63 struct bkey_format *old_f = &b->format;
65 /* stupid integer promotion rules */
67 (((int) new_f->key_u64s - old_f->key_u64s) *
68 (int) b->nr.packed_keys) +
69 (((int) new_f->key_u64s - BKEY_U64s) *
70 (int) b->nr.unpacked_keys);
72 BUG_ON(delta + b->nr.live_u64s < 0);
74 return b->nr.live_u64s + delta;
78 * btree_node_format_fits - check if we could rewrite node with a new format
80 * This assumes all keys can pack with the new format -- it just checks if
81 * the re-packed keys would fit inside the node itself.
83 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
84 struct bkey_format *new_f)
86 size_t u64s = btree_node_u64s_with_format(b, new_f);
88 return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
91 /* Btree node freeing/allocation: */
94 * We're doing the index update that makes @b unreachable, update stuff to
97 * Must be called _before_ btree_interior_update_updated_root() or
98 * btree_interior_update_updated_btree:
100 static void bch2_btree_node_free_index(struct bch_fs *c, struct btree *b,
101 enum btree_id id, struct bkey_s_c k,
102 struct bch_fs_usage *stats)
104 struct btree_interior_update *as;
105 struct pending_btree_node_free *d;
107 mutex_lock(&c->btree_interior_update_lock);
109 for_each_pending_btree_node_free(c, as, d)
110 if (!bkey_cmp(k.k->p, d->key.k.p) &&
111 bkey_val_bytes(k.k) == bkey_val_bytes(&d->key.k) &&
112 !memcmp(k.v, &d->key.v, bkey_val_bytes(k.k)))
117 d->index_update_done = true;
120 * Btree nodes are accounted as freed in bch_alloc_stats when they're
121 * freed from the index:
123 stats->s[S_COMPRESSED][S_META] -= c->sb.btree_node_size;
124 stats->s[S_UNCOMPRESSED][S_META] -= c->sb.btree_node_size;
127 * We're dropping @k from the btree, but it's still live until the
128 * index update is persistent so we need to keep a reference around for
129 * mark and sweep to find - that's primarily what the
130 * btree_node_pending_free list is for.
132 * So here (when we set index_update_done = true), we're moving an
133 * existing reference to a different part of the larger "gc keyspace" -
134 * and the new position comes after the old position, since GC marks
135 * the pending free list after it walks the btree.
137 * If we move the reference while mark and sweep is _between_ the old
138 * and the new position, mark and sweep will see the reference twice
139 * and it'll get double accounted - so check for that here and subtract
140 * to cancel out one of mark and sweep's markings if necessary:
144 * bch2_mark_key() compares the current gc pos to the pos we're
145 * moving this reference from, hence one comparison here:
147 if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
148 struct bch_fs_usage tmp = { 0 };
150 bch2_mark_key(c, bkey_i_to_s_c(&d->key),
151 -c->sb.btree_node_size, true, b
152 ? gc_pos_btree_node(b)
153 : gc_pos_btree_root(id),
156 * Don't apply tmp - pending deletes aren't tracked in
161 mutex_unlock(&c->btree_interior_update_lock);
164 static void __btree_node_free(struct bch_fs *c, struct btree *b,
165 struct btree_iter *iter)
167 trace_btree_node_free(c, b);
169 BUG_ON(btree_node_dirty(b));
170 BUG_ON(btree_node_need_write(b));
171 BUG_ON(b == btree_node_root(c, b));
173 BUG_ON(!list_empty(&b->write_blocked));
174 BUG_ON(b->will_make_reachable);
176 clear_btree_node_noevict(b);
178 six_lock_write(&b->lock);
180 bch2_btree_node_hash_remove(c, b);
182 mutex_lock(&c->btree_cache_lock);
183 list_move(&b->list, &c->btree_cache_freeable);
184 mutex_unlock(&c->btree_cache_lock);
187 * By using six_unlock_write() directly instead of
188 * bch2_btree_node_unlock_write(), we don't update the iterator's
189 * sequence numbers and cause future bch2_btree_node_relock() calls to
192 six_unlock_write(&b->lock);
195 void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
197 struct open_bucket *ob = b->ob;
199 btree_interior_update_drop_new_node(c, b);
203 clear_btree_node_dirty(b);
205 __btree_node_free(c, b, NULL);
207 bch2_open_bucket_put(c, ob);
210 void bch2_btree_node_free_inmem(struct btree_iter *iter, struct btree *b)
212 bch2_btree_iter_node_drop_linked(iter, b);
214 __btree_node_free(iter->c, b, iter);
216 bch2_btree_iter_node_drop(iter, b);
219 static void bch2_btree_node_free_ondisk(struct bch_fs *c,
220 struct pending_btree_node_free *pending)
222 struct bch_fs_usage stats = { 0 };
224 BUG_ON(!pending->index_update_done);
226 bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
227 -c->sb.btree_node_size, true,
228 gc_phase(GC_PHASE_PENDING_DELETE),
231 * Don't apply stats - pending deletes aren't tracked in
236 void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *b)
238 bch2_open_bucket_put(c, b->ob);
242 static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
243 struct disk_reservation *res,
248 struct open_bucket *ob;
251 enum alloc_reserve alloc_reserve;
253 if (flags & BTREE_INSERT_USE_ALLOC_RESERVE) {
255 alloc_reserve = RESERVE_ALLOC;
256 } else if (flags & BTREE_INSERT_USE_RESERVE) {
257 nr_reserve = BTREE_NODE_RESERVE / 2;
258 alloc_reserve = RESERVE_BTREE;
260 nr_reserve = BTREE_NODE_RESERVE;
261 alloc_reserve = RESERVE_NONE;
264 mutex_lock(&c->btree_reserve_cache_lock);
265 if (c->btree_reserve_cache_nr > nr_reserve) {
266 struct btree_alloc *a =
267 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
270 bkey_copy(&tmp.k, &a->k);
271 mutex_unlock(&c->btree_reserve_cache_lock);
274 mutex_unlock(&c->btree_reserve_cache_lock);
277 /* alloc_sectors is weird, I suppose */
278 bkey_extent_init(&tmp.k);
279 tmp.k.k.size = c->sb.btree_node_size,
281 ob = bch2_alloc_sectors(c, &c->btree_write_point,
282 bkey_i_to_extent(&tmp.k),
284 c->opts.metadata_replicas_required,
289 if (tmp.k.k.size < c->sb.btree_node_size) {
290 bch2_open_bucket_put(c, ob);
294 b = bch2_btree_node_mem_alloc(c);
296 /* we hold cannibalize_lock: */
300 bkey_copy(&b->key, &tmp.k);
307 static struct btree *bch2_btree_node_alloc(struct bch_fs *c,
308 unsigned level, enum btree_id id,
309 struct btree_interior_update *as,
310 struct btree_reserve *reserve)
314 BUG_ON(!reserve->nr);
316 b = reserve->b[--reserve->nr];
318 BUG_ON(bch2_btree_node_hash_insert(c, b, level, id));
320 set_btree_node_accessed(b);
321 set_btree_node_dirty(b);
323 bch2_bset_init_first(b, &b->data->keys);
324 memset(&b->nr, 0, sizeof(b->nr));
325 b->data->magic = cpu_to_le64(bset_magic(c));
327 SET_BTREE_NODE_ID(b->data, id);
328 SET_BTREE_NODE_LEVEL(b->data, level);
329 b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr;
331 bch2_btree_build_aux_trees(b);
333 btree_interior_update_will_make_reachable(c, as, b);
335 trace_btree_node_alloc(c, b);
339 struct btree *__bch2_btree_node_alloc_replacement(struct bch_fs *c,
341 struct bkey_format format,
342 struct btree_interior_update *as,
343 struct btree_reserve *reserve)
347 n = bch2_btree_node_alloc(c, b->level, b->btree_id, as, reserve);
349 n->data->min_key = b->data->min_key;
350 n->data->max_key = b->data->max_key;
351 n->data->format = format;
353 btree_node_set_format(n, format);
355 bch2_btree_sort_into(c, n, b);
357 btree_node_reset_sib_u64s(n);
359 n->key.k.p = b->key.k.p;
363 static struct btree *bch2_btree_node_alloc_replacement(struct bch_fs *c,
365 struct btree_interior_update *as,
366 struct btree_reserve *reserve)
368 struct bkey_format new_f = bch2_btree_calc_format(b);
371 * The keys might expand with the new format - if they wouldn't fit in
372 * the btree node anymore, use the old format for now:
374 if (!bch2_btree_node_format_fits(c, b, &new_f))
377 return __bch2_btree_node_alloc_replacement(c, b, new_f, as, reserve);
380 static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b,
381 struct btree_reserve *btree_reserve)
383 struct btree *old = btree_node_root(c, b);
385 /* Root nodes cannot be reaped */
386 mutex_lock(&c->btree_cache_lock);
387 list_del_init(&b->list);
388 mutex_unlock(&c->btree_cache_lock);
390 mutex_lock(&c->btree_root_lock);
391 btree_node_root(c, b) = b;
392 mutex_unlock(&c->btree_root_lock);
396 * New allocation (we're not being called because we're in
397 * bch2_btree_root_read()) - do marking while holding
400 struct bch_fs_usage stats = { 0 };
402 bch2_mark_key(c, bkey_i_to_s_c(&b->key),
403 c->sb.btree_node_size, true,
404 gc_pos_btree_root(b->btree_id),
408 bch2_btree_node_free_index(c, NULL, old->btree_id,
409 bkey_i_to_s_c(&old->key),
411 bch2_fs_usage_apply(c, &stats, &btree_reserve->disk_res,
412 gc_pos_btree_root(b->btree_id));
415 bch2_recalc_btree_reserve(c);
418 static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b)
420 struct btree_root *r = &c->btree_roots[b->btree_id];
422 mutex_lock(&c->btree_root_lock);
425 bkey_copy(&r->key, &b->key);
429 mutex_unlock(&c->btree_root_lock);
433 * Only for filesystem bringup, when first reading the btree roots or allocating
434 * btree roots when initializing a new filesystem:
436 void bch2_btree_set_root_initial(struct bch_fs *c, struct btree *b,
437 struct btree_reserve *btree_reserve)
439 BUG_ON(btree_node_root(c, b));
441 bch2_btree_set_root_inmem(c, b, btree_reserve);
442 bch2_btree_set_root_ondisk(c, b);
446 * bch_btree_set_root - update the root in memory and on disk
448 * To ensure forward progress, the current task must not be holding any
449 * btree node write locks. However, you must hold an intent lock on the
452 * Note: This allocates a journal entry but doesn't add any keys to
453 * it. All the btree roots are part of every journal write, so there
454 * is nothing new to be done. This just guarantees that there is a
457 static void bch2_btree_set_root(struct btree_iter *iter, struct btree *b,
458 struct btree_interior_update *as,
459 struct btree_reserve *btree_reserve)
461 struct bch_fs *c = iter->c;
464 trace_btree_set_root(c, b);
467 old = btree_node_root(c, b);
470 * Ensure no one is using the old root while we switch to the
473 bch2_btree_node_lock_write(old, iter);
475 bch2_btree_set_root_inmem(c, b, btree_reserve);
477 btree_interior_update_updated_root(c, as, iter->btree_id);
480 * Unlock old root after new root is visible:
482 * The new root isn't persistent, but that's ok: we still have
483 * an intent lock on the new root, and any updates that would
484 * depend on the new root would have to update the new root.
486 bch2_btree_node_unlock_write(old, iter);
489 static struct btree *__btree_root_alloc(struct bch_fs *c, unsigned level,
491 struct btree_interior_update *as,
492 struct btree_reserve *reserve)
494 struct btree *b = bch2_btree_node_alloc(c, level, id, as, reserve);
496 b->data->min_key = POS_MIN;
497 b->data->max_key = POS_MAX;
498 b->data->format = bch2_btree_calc_format(b);
499 b->key.k.p = POS_MAX;
501 btree_node_set_format(b, b->data->format);
502 bch2_btree_build_aux_trees(b);
504 six_unlock_write(&b->lock);
509 void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
511 bch2_disk_reservation_put(c, &reserve->disk_res);
513 mutex_lock(&c->btree_reserve_cache_lock);
515 while (reserve->nr) {
516 struct btree *b = reserve->b[--reserve->nr];
518 six_unlock_write(&b->lock);
520 if (c->btree_reserve_cache_nr <
521 ARRAY_SIZE(c->btree_reserve_cache)) {
522 struct btree_alloc *a =
523 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
527 bkey_copy(&a->k, &b->key);
529 bch2_open_bucket_put(c, b->ob);
533 __btree_node_free(c, b, NULL);
535 six_unlock_intent(&b->lock);
538 mutex_unlock(&c->btree_reserve_cache_lock);
540 mempool_free(reserve, &c->btree_reserve_pool);
543 static struct btree_reserve *__bch2_btree_reserve_get(struct bch_fs *c,
548 struct btree_reserve *reserve;
550 struct disk_reservation disk_res = { 0, 0 };
551 unsigned sectors = nr_nodes * c->sb.btree_node_size;
552 int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD|
553 BCH_DISK_RESERVATION_METADATA;
555 if (flags & BTREE_INSERT_NOFAIL)
556 disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
559 * This check isn't necessary for correctness - it's just to potentially
560 * prevent us from doing a lot of work that'll end up being wasted:
562 ret = bch2_journal_error(&c->journal);
566 if (bch2_disk_reservation_get(c, &disk_res, sectors, disk_res_flags))
567 return ERR_PTR(-ENOSPC);
569 BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
572 * Protects reaping from the btree node cache and using the btree node
573 * open bucket reserve:
575 ret = bch2_btree_node_cannibalize_lock(c, cl);
577 bch2_disk_reservation_put(c, &disk_res);
581 reserve = mempool_alloc(&c->btree_reserve_pool, GFP_NOIO);
583 reserve->disk_res = disk_res;
586 while (reserve->nr < nr_nodes) {
587 b = __bch2_btree_node_alloc(c, &disk_res,
588 flags & BTREE_INSERT_NOWAIT
595 ret = bch2_check_mark_super(c, bkey_i_to_s_c_extent(&b->key),
600 reserve->b[reserve->nr++] = b;
603 bch2_btree_node_cannibalize_unlock(c);
606 bch2_btree_reserve_put(c, reserve);
607 bch2_btree_node_cannibalize_unlock(c);
608 trace_btree_reserve_get_fail(c, nr_nodes, cl);
612 struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
614 unsigned extra_nodes,
618 unsigned depth = btree_node_root(c, b)->level - b->level;
619 unsigned nr_nodes = btree_reserve_required_nodes(depth) + extra_nodes;
621 return __bch2_btree_reserve_get(c, nr_nodes, flags, cl);
624 int bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id,
625 struct closure *writes)
627 struct btree_interior_update as;
628 struct btree_reserve *reserve;
632 memset(&as, 0, sizeof(as));
633 closure_init_stack(&cl);
636 /* XXX haven't calculated capacity yet :/ */
637 reserve = __bch2_btree_reserve_get(c, 1, 0, &cl);
638 if (!IS_ERR(reserve))
641 if (PTR_ERR(reserve) == -ENOSPC)
642 return PTR_ERR(reserve);
647 b = __btree_root_alloc(c, 0, id, &as, reserve);
649 bch2_btree_node_write(c, b, writes, SIX_LOCK_intent);
651 bch2_btree_set_root_initial(c, b, reserve);
653 btree_interior_update_drop_new_node(c, b);
654 bch2_btree_open_bucket_put(c, b);
655 six_unlock_intent(&b->lock);
657 bch2_btree_reserve_put(c, reserve);
662 static void bch2_insert_fixup_btree_ptr(struct btree_iter *iter,
664 struct bkey_i *insert,
665 struct btree_node_iter *node_iter,
666 struct disk_reservation *disk_res)
668 struct bch_fs *c = iter->c;
669 struct bch_fs_usage stats = { 0 };
670 struct bkey_packed *k;
673 if (bkey_extent_is_data(&insert->k))
674 bch2_mark_key(c, bkey_i_to_s_c(insert),
675 c->sb.btree_node_size, true,
676 gc_pos_btree_node(b), &stats, 0);
678 while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
679 !btree_iter_pos_cmp_packed(b, &insert->k.p, k, false))
680 bch2_btree_node_iter_advance(node_iter, b);
683 * If we're overwriting, look up pending delete and mark so that gc
684 * marks it on the pending delete list:
686 if (k && !bkey_cmp_packed(b, k, &insert->k))
687 bch2_btree_node_free_index(c, b, iter->btree_id,
688 bkey_disassemble(b, k, &tmp),
691 bch2_fs_usage_apply(c, &stats, disk_res, gc_pos_btree_node(b));
693 bch2_btree_bset_insert_key(iter, b, node_iter, insert);
694 set_btree_node_dirty(b);
695 set_btree_node_need_write(b);
698 /* Inserting into a given leaf node (last stage of insert): */
700 /* Handle overwrites and do insert, for non extents: */
701 bool bch2_btree_bset_insert_key(struct btree_iter *iter,
703 struct btree_node_iter *node_iter,
704 struct bkey_i *insert)
706 const struct bkey_format *f = &b->format;
707 struct bkey_packed *k;
709 unsigned clobber_u64s;
711 EBUG_ON(btree_node_just_written(b));
712 EBUG_ON(bset_written(b, btree_bset_last(b)));
713 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
714 EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
715 bkey_cmp(insert->k.p, b->data->max_key) > 0);
716 BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(iter->c, b));
718 k = bch2_btree_node_iter_peek_all(node_iter, b);
719 if (k && !bkey_cmp_packed(b, k, &insert->k)) {
720 BUG_ON(bkey_whiteout(k));
722 t = bch2_bkey_to_bset(b, k);
724 if (bset_unwritten(b, bset(b, t)) &&
725 bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) {
726 BUG_ON(bkey_whiteout(k) != bkey_whiteout(&insert->k));
728 k->type = insert->k.type;
729 memcpy_u64s(bkeyp_val(f, k), &insert->v,
730 bkey_val_u64s(&insert->k));
734 insert->k.needs_whiteout = k->needs_whiteout;
736 btree_keys_account_key_drop(&b->nr, t - b->set, k);
738 if (t == bset_tree_last(b)) {
739 clobber_u64s = k->u64s;
742 * If we're deleting, and the key we're deleting doesn't
743 * need a whiteout (it wasn't overwriting a key that had
744 * been written to disk) - just delete it:
746 if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
747 bch2_bset_delete(b, k, clobber_u64s);
748 bch2_btree_node_iter_fix(iter, b, node_iter, t,
756 k->type = KEY_TYPE_DELETED;
757 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
760 if (bkey_whiteout(&insert->k)) {
761 reserve_whiteout(b, t, k);
764 k->needs_whiteout = false;
768 * Deleting, but the key to delete wasn't found - nothing to do:
770 if (bkey_whiteout(&insert->k))
773 insert->k.needs_whiteout = false;
776 t = bset_tree_last(b);
777 k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
780 bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
781 if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
782 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
783 clobber_u64s, k->u64s);
787 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
790 struct bch_fs *c = container_of(j, struct bch_fs, journal);
791 struct btree_write *w = container_of(pin, struct btree_write, journal);
792 struct btree *b = container_of(w, struct btree, writes[i]);
794 six_lock_read(&b->lock);
795 bch2_btree_node_write_dirty(c, b, NULL,
796 (btree_current_write(b) == w &&
797 w->journal.pin_list == journal_seq_pin(j, seq)));
798 six_unlock_read(&b->lock);
801 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
803 return __btree_node_flush(j, pin, 0, seq);
806 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
808 return __btree_node_flush(j, pin, 1, seq);
811 void bch2_btree_journal_key(struct btree_insert *trans,
812 struct btree_iter *iter,
813 struct bkey_i *insert)
815 struct bch_fs *c = trans->c;
816 struct journal *j = &c->journal;
817 struct btree *b = iter->nodes[0];
818 struct btree_write *w = btree_current_write(b);
820 EBUG_ON(iter->level || b->level);
821 EBUG_ON(trans->journal_res.ref !=
822 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
824 if (!journal_pin_active(&w->journal))
825 bch2_journal_pin_add(j, &trans->journal_res,
827 btree_node_write_idx(b) == 0
829 : btree_node_flush1);
831 if (trans->journal_res.ref) {
832 u64 seq = trans->journal_res.seq;
833 bool needs_whiteout = insert->k.needs_whiteout;
836 insert->k.needs_whiteout = false;
837 bch2_journal_add_keys(j, &trans->journal_res,
838 b->btree_id, insert);
839 insert->k.needs_whiteout = needs_whiteout;
841 bch2_journal_set_has_inode(j, &trans->journal_res,
844 if (trans->journal_seq)
845 *trans->journal_seq = seq;
846 btree_bset_last(b)->journal_seq = cpu_to_le64(seq);
849 if (!btree_node_dirty(b))
850 set_btree_node_dirty(b);
853 static enum btree_insert_ret
854 bch2_insert_fixup_key(struct btree_insert *trans,
855 struct btree_insert_entry *insert)
857 struct btree_iter *iter = insert->iter;
861 if (bch2_btree_bset_insert_key(iter,
863 &iter->node_iters[0],
865 bch2_btree_journal_key(trans, iter, insert->k);
867 trans->did_work = true;
868 return BTREE_INSERT_OK;
871 static void verify_keys_sorted(struct keylist *l)
873 #ifdef CONFIG_BCACHEFS_DEBUG
876 for_each_keylist_key(l, k)
877 BUG_ON(bkey_next(k) != l->top &&
878 bkey_cmp(k->k.p, bkey_next(k)->k.p) >= 0);
882 static void btree_node_lock_for_insert(struct btree *b, struct btree_iter *iter)
884 struct bch_fs *c = iter->c;
886 bch2_btree_node_lock_write(b, iter);
888 if (btree_node_just_written(b) &&
889 bch2_btree_post_write_cleanup(c, b))
890 bch2_btree_iter_reinit_node(iter, b);
893 * If the last bset has been written, or if it's gotten too big - start
894 * a new bset to insert into:
896 if (want_new_bset(c, b))
897 bch2_btree_init_next(c, b, iter);
900 /* Asynchronous interior node update machinery */
902 struct btree_interior_update *
903 bch2_btree_interior_update_alloc(struct bch_fs *c)
905 struct btree_interior_update *as;
907 as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
908 memset(as, 0, sizeof(*as));
909 closure_init(&as->cl, &c->cl);
911 as->mode = BTREE_INTERIOR_NO_UPDATE;
912 INIT_LIST_HEAD(&as->write_blocked_list);
914 bch2_keylist_init(&as->parent_keys, as->inline_keys,
915 ARRAY_SIZE(as->inline_keys));
917 mutex_lock(&c->btree_interior_update_lock);
918 list_add(&as->list, &c->btree_interior_update_list);
919 mutex_unlock(&c->btree_interior_update_lock);
924 static void btree_interior_update_free(struct closure *cl)
926 struct btree_interior_update *as =
927 container_of(cl, struct btree_interior_update, cl);
929 mempool_free(as, &as->c->btree_interior_update_pool);
932 static void btree_interior_update_nodes_reachable(struct closure *cl)
934 struct btree_interior_update *as =
935 container_of(cl, struct btree_interior_update, cl);
936 struct bch_fs *c = as->c;
938 bch2_journal_pin_drop(&c->journal, &as->journal);
940 mutex_lock(&c->btree_interior_update_lock);
942 while (as->nr_new_nodes) {
943 struct btree *b = as->new_nodes[--as->nr_new_nodes];
945 BUG_ON(b->will_make_reachable != as);
946 b->will_make_reachable = NULL;
947 mutex_unlock(&c->btree_interior_update_lock);
949 six_lock_read(&b->lock);
950 bch2_btree_node_write_dirty(c, b, NULL, btree_node_need_write(b));
951 six_unlock_read(&b->lock);
952 mutex_lock(&c->btree_interior_update_lock);
955 while (as->nr_pending)
956 bch2_btree_node_free_ondisk(c, &as->pending[--as->nr_pending]);
959 mutex_unlock(&c->btree_interior_update_lock);
961 closure_wake_up(&as->wait);
963 closure_return_with_destructor(cl, btree_interior_update_free);
966 static void btree_interior_update_nodes_written(struct closure *cl)
968 struct btree_interior_update *as =
969 container_of(cl, struct btree_interior_update, cl);
970 struct bch_fs *c = as->c;
973 if (bch2_journal_error(&c->journal)) {
975 /* we don't want to free the nodes on disk, that's what */
978 /* XXX: missing error handling, damnit */
980 /* check for journal error, bail out if we flushed */
983 * We did an update to a parent node where the pointers we added pointed
984 * to child nodes that weren't written yet: now, the child nodes have
985 * been written so we can write out the update to the interior node.
988 mutex_lock(&c->btree_interior_update_lock);
990 case BTREE_INTERIOR_NO_UPDATE:
992 case BTREE_INTERIOR_UPDATING_NODE:
993 /* The usual case: */
994 b = READ_ONCE(as->b);
996 if (!six_trylock_read(&b->lock)) {
997 mutex_unlock(&c->btree_interior_update_lock);
998 six_lock_read(&b->lock);
999 six_unlock_read(&b->lock);
1003 BUG_ON(!btree_node_dirty(b));
1004 closure_wait(&btree_current_write(b)->wait, cl);
1006 list_del(&as->write_blocked_list);
1007 mutex_unlock(&c->btree_interior_update_lock);
1009 bch2_btree_node_write_dirty(c, b, NULL,
1010 btree_node_need_write(b));
1011 six_unlock_read(&b->lock);
1014 case BTREE_INTERIOR_UPDATING_AS:
1016 * The btree node we originally updated has been freed and is
1017 * being rewritten - so we need to write anything here, we just
1018 * need to signal to that btree_interior_update that it's ok to make the
1019 * new replacement node visible:
1021 closure_put(&as->parent_as->cl);
1024 * and then we have to wait on that btree_interior_update to finish:
1026 closure_wait(&as->parent_as->wait, cl);
1027 mutex_unlock(&c->btree_interior_update_lock);
1030 case BTREE_INTERIOR_UPDATING_ROOT:
1031 /* b is the new btree root: */
1032 b = READ_ONCE(as->b);
1034 if (!six_trylock_read(&b->lock)) {
1035 mutex_unlock(&c->btree_interior_update_lock);
1036 six_lock_read(&b->lock);
1037 six_unlock_read(&b->lock);
1041 BUG_ON(c->btree_roots[b->btree_id].as != as);
1042 c->btree_roots[b->btree_id].as = NULL;
1044 bch2_btree_set_root_ondisk(c, b);
1047 * We don't have to wait anything anything here (before
1048 * btree_interior_update_nodes_reachable frees the old nodes
1049 * ondisk) - we've ensured that the very next journal write will
1050 * have the pointer to the new root, and before the allocator
1051 * can reuse the old nodes it'll have to do a journal commit:
1053 six_unlock_read(&b->lock);
1054 mutex_unlock(&c->btree_interior_update_lock);
1057 * Bit of funny circularity going on here we have to break:
1059 * We have to drop our journal pin before writing the journal
1060 * entry that points to the new btree root: else, we could
1061 * deadlock if the journal currently happens to be full.
1063 * This mean we're dropping the journal pin _before_ the new
1064 * nodes are technically reachable - but this is safe, because
1065 * after the bch2_btree_set_root_ondisk() call above they will
1066 * be reachable as of the very next journal write:
1068 bch2_journal_pin_drop(&c->journal, &as->journal);
1071 * And, do a journal write to write the pointer to the new root,
1072 * then wait for it to complete before freeing the nodes we
1075 bch2_journal_meta_async(&c->journal, cl);
1079 continue_at(cl, btree_interior_update_nodes_reachable, system_wq);
1083 * We're updating @b with pointers to nodes that haven't finished writing yet:
1084 * block @b from being written until @as completes
1086 static void btree_interior_update_updated_btree(struct bch_fs *c,
1087 struct btree_interior_update *as,
1090 mutex_lock(&c->btree_interior_update_lock);
1092 BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1093 BUG_ON(!btree_node_dirty(b));
1095 as->mode = BTREE_INTERIOR_UPDATING_NODE;
1097 list_add(&as->write_blocked_list, &b->write_blocked);
1099 mutex_unlock(&c->btree_interior_update_lock);
1102 * In general, when you're staging things in a journal that will later
1103 * be written elsewhere, and you also want to guarantee ordering: that
1104 * is, if you have updates a, b, c, after a crash you should never see c
1105 * and not a or b - there's a problem:
1107 * If the final destination of the update(s) (i.e. btree node) can be
1108 * written/flushed _before_ the relevant journal entry - oops, that
1109 * breaks ordering, since the various leaf nodes can be written in any
1112 * Normally we use bset->journal_seq to deal with this - if during
1113 * recovery we find a btree node write that's newer than the newest
1114 * journal entry, we just ignore it - we don't need it, anything we're
1115 * supposed to have (that we reported as completed via fsync()) will
1116 * still be in the journal, and as far as the state of the journal is
1117 * concerned that btree node write never happened.
1119 * That breaks when we're rewriting/splitting/merging nodes, since we're
1120 * mixing btree node writes that haven't happened yet with previously
1121 * written data that has been reported as completed to the journal.
1123 * Thus, before making the new nodes reachable, we have to wait the
1124 * newest journal sequence number we have data for to be written (if it
1127 bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
1129 continue_at(&as->cl, btree_interior_update_nodes_written,
1130 system_freezable_wq);
1133 static void interior_update_flush(struct journal *j,
1134 struct journal_entry_pin *pin, u64 seq)
1136 struct btree_interior_update *as =
1137 container_of(pin, struct btree_interior_update, journal);
1139 bch2_journal_flush_seq_async(j, as->journal_seq, NULL);
1142 static void btree_interior_update_reparent(struct bch_fs *c,
1143 struct btree_interior_update *as,
1144 struct btree_interior_update *child)
1147 child->mode = BTREE_INTERIOR_UPDATING_AS;
1148 child->parent_as = as;
1149 closure_get(&as->cl);
1152 * When we write a new btree root, we have to drop our journal pin
1153 * _before_ the new nodes are technically reachable; see
1154 * btree_interior_update_nodes_written().
1156 * This goes for journal pins that are recursively blocked on us - so,
1157 * just transfer the journal pin to the new interior update so
1158 * btree_interior_update_nodes_written() can drop it.
1160 bch2_journal_pin_add_if_older(&c->journal, &child->journal,
1161 &as->journal, interior_update_flush);
1162 bch2_journal_pin_drop(&c->journal, &child->journal);
1164 as->journal_seq = max(as->journal_seq, child->journal_seq);
1167 static void btree_interior_update_updated_root(struct bch_fs *c,
1168 struct btree_interior_update *as,
1169 enum btree_id btree_id)
1171 struct btree_root *r = &c->btree_roots[btree_id];
1173 mutex_lock(&c->btree_interior_update_lock);
1175 BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
1178 * Old root might not be persistent yet - if so, redirect its
1179 * btree_interior_update operation to point to us:
1182 btree_interior_update_reparent(c, as, r->as);
1184 as->mode = BTREE_INTERIOR_UPDATING_ROOT;
1188 mutex_unlock(&c->btree_interior_update_lock);
1191 * When we're rewriting nodes and updating interior nodes, there's an
1192 * issue with updates that haven't been written in the journal getting
1193 * mixed together with older data - see * btree_interior_update_updated_btree()
1194 * for the explanation.
1196 * However, this doesn't affect us when we're writing a new btree root -
1197 * because to make that new root reachable we have to write out a new
1198 * journal entry, which must necessarily be newer than as->journal_seq.
1201 continue_at(&as->cl, btree_interior_update_nodes_written,
1202 system_freezable_wq);
1205 static void btree_interior_update_will_make_reachable(struct bch_fs *c,
1206 struct btree_interior_update *as,
1209 mutex_lock(&c->btree_interior_update_lock);
1210 BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
1211 BUG_ON(b->will_make_reachable);
1213 as->new_nodes[as->nr_new_nodes++] = b;
1214 b->will_make_reachable = as;
1215 mutex_unlock(&c->btree_interior_update_lock);
1218 static void __btree_interior_update_drop_new_node(struct btree *b)
1220 struct btree_interior_update *as = b->will_make_reachable;
1225 for (i = 0; i < as->nr_new_nodes; i++)
1226 if (as->new_nodes[i] == b)
1232 memmove(&as->new_nodes[i],
1233 &as->new_nodes[i + 1],
1234 sizeof(struct btree *) * (as->nr_new_nodes - i));
1235 b->will_make_reachable = NULL;
1238 static void btree_interior_update_drop_new_node(struct bch_fs *c,
1241 mutex_lock(&c->btree_interior_update_lock);
1242 __btree_interior_update_drop_new_node(b);
1243 mutex_unlock(&c->btree_interior_update_lock);
1246 static void bch2_btree_interior_update_add_node_reference(struct bch_fs *c,
1247 struct btree_interior_update *as,
1250 struct pending_btree_node_free *d;
1252 mutex_lock(&c->btree_interior_update_lock);
1254 /* Add this node to the list of nodes being freed: */
1255 BUG_ON(as->nr_pending >= ARRAY_SIZE(as->pending));
1257 d = &as->pending[as->nr_pending++];
1258 d->index_update_done = false;
1259 d->seq = b->data->keys.seq;
1260 d->btree_id = b->btree_id;
1261 d->level = b->level;
1262 bkey_copy(&d->key, &b->key);
1264 mutex_unlock(&c->btree_interior_update_lock);
1268 * @b is being split/rewritten: it may have pointers to not-yet-written btree
1269 * nodes and thus outstanding btree_interior_updates - redirect @b's
1270 * btree_interior_updates to point to this btree_interior_update:
1272 void bch2_btree_interior_update_will_free_node(struct bch_fs *c,
1273 struct btree_interior_update *as,
1276 struct closure *cl, *cl_n;
1277 struct btree_interior_update *p, *n;
1278 struct btree_write *w;
1279 struct bset_tree *t;
1281 bch2_btree_interior_update_add_node_reference(c, as, b);
1284 * Does this node have data that hasn't been written in the journal?
1286 * If so, we have to wait for the corresponding journal entry to be
1287 * written before making the new nodes reachable - we can't just carry
1288 * over the bset->journal_seq tracking, since we'll be mixing those keys
1289 * in with keys that aren't in the journal anymore:
1292 as->journal_seq = max(as->journal_seq, bset(b, t)->journal_seq);
1294 mutex_lock(&c->btree_interior_update_lock);
1297 * Does this node have any btree_interior_update operations preventing
1298 * it from being written?
1300 * If so, redirect them to point to this btree_interior_update: we can
1301 * write out our new nodes, but we won't make them visible until those
1302 * operations complete
1304 list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
1305 list_del(&p->write_blocked_list);
1306 btree_interior_update_reparent(c, as, p);
1309 clear_btree_node_dirty(b);
1310 clear_btree_node_need_write(b);
1311 w = btree_current_write(b);
1313 llist_for_each_entry_safe(cl, cl_n, llist_del_all(&w->wait.list), list)
1314 llist_add(&cl->list, &as->wait.list);
1317 * Does this node have unwritten data that has a pin on the journal?
1319 * If so, transfer that pin to the btree_interior_update operation -
1320 * note that if we're freeing multiple nodes, we only need to keep the
1321 * oldest pin of any of the nodes we're freeing. We'll release the pin
1322 * when the new nodes are persistent and reachable on disk:
1324 bch2_journal_pin_add_if_older(&c->journal, &w->journal,
1325 &as->journal, interior_update_flush);
1326 bch2_journal_pin_drop(&c->journal, &w->journal);
1328 w = btree_prev_write(b);
1329 bch2_journal_pin_add_if_older(&c->journal, &w->journal,
1330 &as->journal, interior_update_flush);
1331 bch2_journal_pin_drop(&c->journal, &w->journal);
1333 if (b->will_make_reachable)
1334 __btree_interior_update_drop_new_node(b);
1336 mutex_unlock(&c->btree_interior_update_lock);
1339 static void btree_node_interior_verify(struct btree *b)
1341 struct btree_node_iter iter;
1342 struct bkey_packed *k;
1346 bch2_btree_node_iter_init(&iter, b, b->key.k.p, false, false);
1348 BUG_ON(!(k = bch2_btree_node_iter_peek(&iter, b)) ||
1349 bkey_cmp_left_packed(b, k, &b->key.k.p));
1351 BUG_ON((bch2_btree_node_iter_advance(&iter, b),
1352 !bch2_btree_node_iter_end(&iter)));
1357 k = bch2_btree_node_iter_peek(&iter, b);
1361 msg = "isn't what it should be";
1362 if (bkey_cmp_left_packed(b, k, &b->key.k.p))
1365 bch2_btree_node_iter_advance(&iter, b);
1367 msg = "isn't last key";
1368 if (!bch2_btree_node_iter_end(&iter))
1372 bch2_dump_btree_node(b);
1373 printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode,
1374 b->key.k.p.offset, msg);
1380 bch2_btree_insert_keys_interior(struct btree *b,
1381 struct btree_iter *iter,
1382 struct keylist *insert_keys,
1383 struct btree_interior_update *as,
1384 struct btree_reserve *res)
1386 struct bch_fs *c = iter->c;
1387 struct btree_iter *linked;
1388 struct btree_node_iter node_iter;
1389 struct bkey_i *insert = bch2_keylist_front(insert_keys);
1390 struct bkey_packed *k;
1392 BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1394 BUG_ON(!as || as->b);
1395 verify_keys_sorted(insert_keys);
1397 btree_node_lock_for_insert(b, iter);
1399 if (bch_keylist_u64s(insert_keys) >
1400 bch_btree_keys_u64s_remaining(c, b)) {
1401 bch2_btree_node_unlock_write(b, iter);
1405 /* Don't screw up @iter's position: */
1406 node_iter = iter->node_iters[b->level];
1409 * btree_split(), btree_gc_coalesce() will insert keys before
1410 * the iterator's current position - they know the keys go in
1411 * the node the iterator points to:
1413 while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
1414 (bkey_cmp_packed(b, k, &insert->k) >= 0))
1417 while (!bch2_keylist_empty(insert_keys)) {
1418 insert = bch2_keylist_front(insert_keys);
1420 bch2_insert_fixup_btree_ptr(iter, b, insert,
1421 &node_iter, &res->disk_res);
1422 bch2_keylist_pop_front(insert_keys);
1425 btree_interior_update_updated_btree(c, as, b);
1427 for_each_linked_btree_node(iter, b, linked)
1428 bch2_btree_node_iter_peek(&linked->node_iters[b->level],
1430 bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
1432 bch2_btree_iter_verify(iter, b);
1434 if (bch2_maybe_compact_whiteouts(c, b))
1435 bch2_btree_iter_reinit_node(iter, b);
1437 bch2_btree_node_unlock_write(b, iter);
1439 btree_node_interior_verify(b);
1444 * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1447 static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n1,
1448 struct btree_reserve *reserve,
1449 struct btree_interior_update *as)
1451 struct bch_fs *c = iter->c;
1452 size_t nr_packed = 0, nr_unpacked = 0;
1454 struct bset *set1, *set2;
1455 struct bkey_packed *k, *prev = NULL;
1457 n2 = bch2_btree_node_alloc(c, n1->level, iter->btree_id, as, reserve);
1459 n2->data->max_key = n1->data->max_key;
1460 n2->data->format = n1->format;
1461 n2->key.k.p = n1->key.k.p;
1463 btree_node_set_format(n2, n2->data->format);
1465 set1 = btree_bset_first(n1);
1466 set2 = btree_bset_first(n2);
1469 * Has to be a linear search because we don't have an auxiliary
1474 if (bkey_next(k) == vstruct_last(set1))
1476 if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
1490 n1->key.k.p = bkey_unpack_pos(n1, prev);
1491 n1->data->max_key = n1->key.k.p;
1493 btree_type_successor(n1->btree_id, n1->key.k.p);
1495 set2->u64s = cpu_to_le16((u64 *) vstruct_end(set1) - (u64 *) k);
1496 set1->u64s = cpu_to_le16(le16_to_cpu(set1->u64s) - le16_to_cpu(set2->u64s));
1498 set_btree_bset_end(n1, n1->set);
1499 set_btree_bset_end(n2, n2->set);
1501 n2->nr.live_u64s = le16_to_cpu(set2->u64s);
1502 n2->nr.bset_u64s[0] = le16_to_cpu(set2->u64s);
1503 n2->nr.packed_keys = n1->nr.packed_keys - nr_packed;
1504 n2->nr.unpacked_keys = n1->nr.unpacked_keys - nr_unpacked;
1506 n1->nr.live_u64s = le16_to_cpu(set1->u64s);
1507 n1->nr.bset_u64s[0] = le16_to_cpu(set1->u64s);
1508 n1->nr.packed_keys = nr_packed;
1509 n1->nr.unpacked_keys = nr_unpacked;
1511 BUG_ON(!set1->u64s);
1512 BUG_ON(!set2->u64s);
1514 memcpy_u64s(set2->start,
1516 le16_to_cpu(set2->u64s));
1518 btree_node_reset_sib_u64s(n1);
1519 btree_node_reset_sib_u64s(n2);
1521 bch2_verify_btree_nr_keys(n1);
1522 bch2_verify_btree_nr_keys(n2);
1525 btree_node_interior_verify(n1);
1526 btree_node_interior_verify(n2);
1533 * For updates to interior nodes, we've got to do the insert before we split
1534 * because the stuff we're inserting has to be inserted atomically. Post split,
1535 * the keys might have to go in different nodes and the split would no longer be
1538 * Worse, if the insert is from btree node coalescing, if we do the insert after
1539 * we do the split (and pick the pivot) - the pivot we pick might be between
1540 * nodes that were coalesced, and thus in the middle of a child node post
1543 static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b,
1544 struct keylist *keys,
1545 struct btree_reserve *res)
1547 struct btree_node_iter node_iter;
1548 struct bkey_i *k = bch2_keylist_front(keys);
1549 struct bkey_packed *p;
1552 BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
1554 bch2_btree_node_iter_init(&node_iter, b, k->k.p, false, false);
1556 while (!bch2_keylist_empty(keys)) {
1557 k = bch2_keylist_front(keys);
1559 BUG_ON(bch_keylist_u64s(keys) >
1560 bch_btree_keys_u64s_remaining(iter->c, b));
1561 BUG_ON(bkey_cmp(k->k.p, b->data->min_key) < 0);
1562 BUG_ON(bkey_cmp(k->k.p, b->data->max_key) > 0);
1564 bch2_insert_fixup_btree_ptr(iter, b, k, &node_iter, &res->disk_res);
1565 bch2_keylist_pop_front(keys);
1569 * We can't tolerate whiteouts here - with whiteouts there can be
1570 * duplicate keys, and it would be rather bad if we picked a duplicate
1573 i = btree_bset_first(b);
1575 while (p != vstruct_last(i))
1576 if (bkey_deleted(p)) {
1577 le16_add_cpu(&i->u64s, -p->u64s);
1578 set_btree_bset_end(b, b->set);
1579 memmove_u64s_down(p, bkey_next(p),
1580 (u64 *) vstruct_last(i) -
1585 BUG_ON(b->nsets != 1 ||
1586 b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
1588 btree_node_interior_verify(b);
1591 static void btree_split(struct btree *b, struct btree_iter *iter,
1592 struct keylist *insert_keys,
1593 struct btree_reserve *reserve,
1594 struct btree_interior_update *as)
1596 struct bch_fs *c = iter->c;
1597 struct btree *parent = iter->nodes[b->level + 1];
1598 struct btree *n1, *n2 = NULL, *n3 = NULL;
1599 u64 start_time = local_clock();
1601 BUG_ON(!parent && (b != btree_node_root(c, b)));
1602 BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
1604 bch2_btree_interior_update_will_free_node(c, as, b);
1606 n1 = bch2_btree_node_alloc_replacement(c, b, as, reserve);
1609 btree_split_insert_keys(iter, n1, insert_keys, reserve);
1611 if (vstruct_blocks(n1->data, c->block_bits) > BTREE_SPLIT_THRESHOLD(c)) {
1612 trace_btree_node_split(c, b, b->nr.live_u64s);
1614 n2 = __btree_split_node(iter, n1, reserve, as);
1616 bch2_btree_build_aux_trees(n2);
1617 bch2_btree_build_aux_trees(n1);
1618 six_unlock_write(&n2->lock);
1619 six_unlock_write(&n1->lock);
1621 bch2_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent);
1624 * Note that on recursive parent_keys == insert_keys, so we
1625 * can't start adding new keys to parent_keys before emptying it
1626 * out (which we did with btree_split_insert_keys() above)
1628 bch2_keylist_add(&as->parent_keys, &n1->key);
1629 bch2_keylist_add(&as->parent_keys, &n2->key);
1632 /* Depth increases, make a new root */
1633 n3 = __btree_root_alloc(c, b->level + 1,
1637 n3->sib_u64s[0] = U16_MAX;
1638 n3->sib_u64s[1] = U16_MAX;
1640 btree_split_insert_keys(iter, n3, &as->parent_keys,
1642 bch2_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent);
1645 trace_btree_node_compact(c, b, b->nr.live_u64s);
1647 bch2_btree_build_aux_trees(n1);
1648 six_unlock_write(&n1->lock);
1650 bch2_keylist_add(&as->parent_keys, &n1->key);
1653 bch2_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent);
1655 /* New nodes all written, now make them visible: */
1658 /* Split a non root node */
1659 bch2_btree_insert_node(parent, iter, &as->parent_keys,
1662 bch2_btree_set_root(iter, n3, as, reserve);
1664 /* Root filled up but didn't need to be split */
1665 bch2_btree_set_root(iter, n1, as, reserve);
1668 bch2_btree_open_bucket_put(c, n1);
1670 bch2_btree_open_bucket_put(c, n2);
1672 bch2_btree_open_bucket_put(c, n3);
1675 * Note - at this point other linked iterators could still have @b read
1676 * locked; we're depending on the bch2_btree_iter_node_replace() calls
1677 * below removing all references to @b so we don't return with other
1678 * iterators pointing to a node they have locked that's been freed.
1680 * We have to free the node first because the bch2_iter_node_replace()
1681 * calls will drop _our_ iterator's reference - and intent lock - to @b.
1683 bch2_btree_node_free_inmem(iter, b);
1685 /* Successful split, update the iterator to point to the new nodes: */
1688 bch2_btree_iter_node_replace(iter, n3);
1690 bch2_btree_iter_node_replace(iter, n2);
1691 bch2_btree_iter_node_replace(iter, n1);
1693 bch2_time_stats_update(&c->btree_split_time, start_time);
1697 * bch_btree_insert_node - insert bkeys into a given btree node
1699 * @iter: btree iterator
1700 * @insert_keys: list of keys to insert
1701 * @hook: insert callback
1702 * @persistent: if not null, @persistent will wait on journal write
1704 * Inserts as many keys as it can into a given btree node, splitting it if full.
1705 * If a split occurred, this function will return early. This can only happen
1706 * for leaf nodes -- inserts into interior nodes have to be atomic.
1708 void bch2_btree_insert_node(struct btree *b,
1709 struct btree_iter *iter,
1710 struct keylist *insert_keys,
1711 struct btree_reserve *reserve,
1712 struct btree_interior_update *as)
1715 BUG_ON(!reserve || !as);
1717 if ((as->flags & BTREE_INTERIOR_UPDATE_MUST_REWRITE) ||
1718 bch2_btree_insert_keys_interior(b, iter, insert_keys,
1720 btree_split(b, iter, insert_keys, reserve, as);
1723 static int bch2_btree_split_leaf(struct btree_iter *iter, unsigned flags)
1725 struct bch_fs *c = iter->c;
1726 struct btree *b = iter->nodes[0];
1727 struct btree_reserve *reserve;
1728 struct btree_interior_update *as;
1732 closure_init_stack(&cl);
1734 /* Hack, because gc and splitting nodes doesn't mix yet: */
1735 if (!down_read_trylock(&c->gc_lock)) {
1736 bch2_btree_iter_unlock(iter);
1737 down_read(&c->gc_lock);
1741 * XXX: figure out how far we might need to split,
1742 * instead of locking/reserving all the way to the root:
1744 if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1749 reserve = bch2_btree_reserve_get(c, b, 0, flags, &cl);
1750 if (IS_ERR(reserve)) {
1751 ret = PTR_ERR(reserve);
1752 if (ret == -EAGAIN) {
1753 bch2_btree_iter_unlock(iter);
1754 up_read(&c->gc_lock);
1761 as = bch2_btree_interior_update_alloc(c);
1763 btree_split(b, iter, NULL, reserve, as);
1764 bch2_btree_reserve_put(c, reserve);
1766 bch2_btree_iter_set_locks_want(iter, 1);
1768 up_read(&c->gc_lock);
1772 enum btree_node_sibling {
1777 static struct btree *btree_node_get_sibling(struct btree_iter *iter,
1779 enum btree_node_sibling sib)
1781 struct btree *parent;
1782 struct btree_node_iter node_iter;
1783 struct bkey_packed *k;
1786 unsigned level = b->level;
1788 parent = iter->nodes[level + 1];
1792 if (!bch2_btree_node_relock(iter, level + 1)) {
1793 bch2_btree_iter_set_locks_want(iter, level + 2);
1794 return ERR_PTR(-EINTR);
1797 node_iter = iter->node_iters[parent->level];
1799 k = bch2_btree_node_iter_peek_all(&node_iter, parent);
1800 BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
1803 k = sib == btree_prev_sib
1804 ? bch2_btree_node_iter_prev_all(&node_iter, parent)
1805 : (bch2_btree_node_iter_advance(&node_iter, parent),
1806 bch2_btree_node_iter_peek_all(&node_iter, parent));
1809 } while (bkey_deleted(k));
1811 bch2_bkey_unpack(parent, &tmp.k, k);
1813 ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1815 if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) {
1816 btree_node_unlock(iter, level);
1817 ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
1820 if (!IS_ERR(ret) && !bch2_btree_node_relock(iter, level)) {
1821 six_unlock_intent(&ret->lock);
1822 ret = ERR_PTR(-EINTR);
1828 static int __foreground_maybe_merge(struct btree_iter *iter,
1829 enum btree_node_sibling sib)
1831 struct bch_fs *c = iter->c;
1832 struct btree_reserve *reserve;
1833 struct btree_interior_update *as;
1834 struct bkey_format_state new_s;
1835 struct bkey_format new_f;
1836 struct bkey_i delete;
1837 struct btree *b, *m, *n, *prev, *next, *parent;
1842 closure_init_stack(&cl);
1844 if (!bch2_btree_node_relock(iter, iter->level))
1847 b = iter->nodes[iter->level];
1849 parent = iter->nodes[b->level + 1];
1853 if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1856 /* XXX: can't be holding read locks */
1857 m = btree_node_get_sibling(iter, b, sib);
1863 /* NULL means no sibling: */
1865 b->sib_u64s[sib] = U16_MAX;
1869 if (sib == btree_prev_sib) {
1877 bch2_bkey_format_init(&new_s);
1878 __bch2_btree_calc_format(&new_s, b);
1879 __bch2_btree_calc_format(&new_s, m);
1880 new_f = bch2_bkey_format_done(&new_s);
1882 sib_u64s = btree_node_u64s_with_format(b, &new_f) +
1883 btree_node_u64s_with_format(m, &new_f);
1885 if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
1886 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1888 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1891 sib_u64s = min(sib_u64s, btree_max_u64s(c));
1892 b->sib_u64s[sib] = sib_u64s;
1894 if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
1895 six_unlock_intent(&m->lock);
1899 /* We're changing btree topology, doesn't mix with gc: */
1900 if (!down_read_trylock(&c->gc_lock)) {
1901 six_unlock_intent(&m->lock);
1902 bch2_btree_iter_unlock(iter);
1904 down_read(&c->gc_lock);
1905 up_read(&c->gc_lock);
1910 if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
1915 reserve = bch2_btree_reserve_get(c, b, 0,
1916 BTREE_INSERT_NOFAIL|
1917 BTREE_INSERT_USE_RESERVE,
1919 if (IS_ERR(reserve)) {
1920 ret = PTR_ERR(reserve);
1924 as = bch2_btree_interior_update_alloc(c);
1926 bch2_btree_interior_update_will_free_node(c, as, b);
1927 bch2_btree_interior_update_will_free_node(c, as, m);
1929 n = bch2_btree_node_alloc(c, b->level, b->btree_id, as, reserve);
1931 n->data->min_key = prev->data->min_key;
1932 n->data->max_key = next->data->max_key;
1933 n->data->format = new_f;
1934 n->key.k.p = next->key.k.p;
1936 btree_node_set_format(n, new_f);
1938 bch2_btree_sort_into(c, n, prev);
1939 bch2_btree_sort_into(c, n, next);
1941 bch2_btree_build_aux_trees(n);
1942 six_unlock_write(&n->lock);
1944 bkey_init(&delete.k);
1945 delete.k.p = prev->key.k.p;
1946 bch2_keylist_add(&as->parent_keys, &delete);
1947 bch2_keylist_add(&as->parent_keys, &n->key);
1949 bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
1951 bch2_btree_insert_node(parent, iter, &as->parent_keys, reserve, as);
1953 bch2_btree_open_bucket_put(c, n);
1954 bch2_btree_node_free_inmem(iter, b);
1955 bch2_btree_node_free_inmem(iter, m);
1956 bch2_btree_iter_node_replace(iter, n);
1958 bch2_btree_iter_verify(iter, n);
1960 bch2_btree_reserve_put(c, reserve);
1962 if (ret != -EINTR && ret != -EAGAIN)
1963 bch2_btree_iter_set_locks_want(iter, 1);
1964 six_unlock_intent(&m->lock);
1965 up_read(&c->gc_lock);
1967 if (ret == -EAGAIN || ret == -EINTR) {
1968 bch2_btree_iter_unlock(iter);
1974 if (ret == -EINTR) {
1975 ret = bch2_btree_iter_traverse(iter);
1983 static int inline foreground_maybe_merge(struct btree_iter *iter,
1984 enum btree_node_sibling sib)
1986 struct bch_fs *c = iter->c;
1989 if (!btree_node_locked(iter, iter->level))
1992 b = iter->nodes[iter->level];
1993 if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
1996 return __foreground_maybe_merge(iter, sib);
2000 * btree_insert_key - insert a key one key into a leaf node
2002 static enum btree_insert_ret
2003 btree_insert_key(struct btree_insert *trans,
2004 struct btree_insert_entry *insert)
2006 struct bch_fs *c = trans->c;
2007 struct btree_iter *iter = insert->iter;
2008 struct btree *b = iter->nodes[0];
2009 enum btree_insert_ret ret;
2010 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
2011 int old_live_u64s = b->nr.live_u64s;
2012 int live_u64s_added, u64s_added;
2014 iter->flags &= ~BTREE_ITER_UPTODATE;
2016 ret = !btree_node_is_extents(b)
2017 ? bch2_insert_fixup_key(trans, insert)
2018 : bch2_insert_fixup_extent(trans, insert);
2020 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
2021 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
2023 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
2024 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
2025 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
2026 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
2028 if (u64s_added > live_u64s_added &&
2029 bch2_maybe_compact_whiteouts(iter->c, b))
2030 bch2_btree_iter_reinit_node(iter, b);
2032 trace_btree_insert_key(c, b, insert->k);
2036 static bool same_leaf_as_prev(struct btree_insert *trans,
2037 struct btree_insert_entry *i)
2040 * Because we sorted the transaction entries, if multiple iterators
2041 * point to the same leaf node they'll always be adjacent now:
2043 return i != trans->entries &&
2044 i[0].iter->nodes[0] == i[-1].iter->nodes[0];
2047 #define trans_for_each_entry(trans, i) \
2048 for ((i) = (trans)->entries; (i) < (trans)->entries + (trans)->nr; (i)++)
2050 static void multi_lock_write(struct btree_insert *trans)
2052 struct btree_insert_entry *i;
2054 trans_for_each_entry(trans, i)
2055 if (!same_leaf_as_prev(trans, i))
2056 btree_node_lock_for_insert(i->iter->nodes[0], i->iter);
2059 static void multi_unlock_write(struct btree_insert *trans)
2061 struct btree_insert_entry *i;
2063 trans_for_each_entry(trans, i)
2064 if (!same_leaf_as_prev(trans, i))
2065 bch2_btree_node_unlock_write(i->iter->nodes[0], i->iter);
2068 static int btree_trans_entry_cmp(const void *_l, const void *_r)
2070 const struct btree_insert_entry *l = _l;
2071 const struct btree_insert_entry *r = _r;
2073 return btree_iter_cmp(l->iter, r->iter);
2076 /* Normal update interface: */
2079 * __bch_btree_insert_at - insert keys at given iterator positions
2081 * This is main entry point for btree updates.
2084 * -EINTR: locking changed, this function should be called again. Only returned
2085 * if passed BTREE_INSERT_ATOMIC.
2086 * -EROFS: filesystem read only
2087 * -EIO: journal or btree node IO error
2089 int __bch2_btree_insert_at(struct btree_insert *trans)
2091 struct bch_fs *c = trans->c;
2092 struct btree_insert_entry *i;
2093 struct btree_iter *split = NULL;
2094 bool cycle_gc_lock = false;
2098 trans_for_each_entry(trans, i) {
2099 BUG_ON(i->iter->level);
2100 BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
2103 sort(trans->entries, trans->nr, sizeof(trans->entries[0]),
2104 btree_trans_entry_cmp, NULL);
2106 if (unlikely(!percpu_ref_tryget(&c->writes)))
2110 trans_for_each_entry(trans, i)
2111 if (!bch2_btree_iter_set_locks_want(i->iter, 1))
2114 trans->did_work = false;
2116 trans_for_each_entry(trans, i)
2118 u64s += jset_u64s(i->k->k.u64s + i->extra_res);
2120 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
2122 ret = !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)
2123 ? bch2_journal_res_get(&c->journal,
2124 &trans->journal_res,
2130 multi_lock_write(trans);
2133 trans_for_each_entry(trans, i) {
2134 /* Multiple inserts might go to same leaf: */
2135 if (!same_leaf_as_prev(trans, i))
2139 * bch2_btree_node_insert_fits() must be called under write lock:
2140 * with only an intent lock, another thread can still call
2141 * bch2_btree_node_write(), converting an unwritten bset to a
2145 u64s += i->k->k.u64s + i->extra_res;
2146 if (!bch2_btree_node_insert_fits(c,
2147 i->iter->nodes[0], u64s)) {
2156 cycle_gc_lock = false;
2158 trans_for_each_entry(trans, i) {
2162 switch (btree_insert_key(trans, i)) {
2163 case BTREE_INSERT_OK:
2166 case BTREE_INSERT_JOURNAL_RES_FULL:
2167 case BTREE_INSERT_NEED_TRAVERSE:
2170 case BTREE_INSERT_NEED_RESCHED:
2173 case BTREE_INSERT_BTREE_NODE_FULL:
2176 case BTREE_INSERT_ENOSPC:
2179 case BTREE_INSERT_NEED_GC_LOCK:
2180 cycle_gc_lock = true;
2187 if (!trans->did_work && (ret || split))
2191 multi_unlock_write(trans);
2192 bch2_journal_res_put(&c->journal, &trans->journal_res);
2200 * hack: iterators are inconsistent when they hit end of leaf, until
2203 trans_for_each_entry(trans, i)
2204 if (i->iter->flags & BTREE_ITER_AT_END_OF_LEAF)
2207 trans_for_each_entry(trans, i)
2208 if (!same_leaf_as_prev(trans, i)) {
2209 foreground_maybe_merge(i->iter, btree_prev_sib);
2210 foreground_maybe_merge(i->iter, btree_next_sib);
2213 /* make sure we didn't lose an error: */
2214 if (!ret && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
2215 trans_for_each_entry(trans, i)
2218 percpu_ref_put(&c->writes);
2222 * have to drop journal res before splitting, because splitting means
2223 * allocating new btree nodes, and holding a journal reservation
2224 * potentially blocks the allocator:
2226 ret = bch2_btree_split_leaf(split, trans->flags);
2230 * if the split didn't have to drop locks the insert will still be
2231 * atomic (in the BTREE_INSERT_ATOMIC sense, what the caller peeked()
2232 * and is overwriting won't have changed)
2236 if (cycle_gc_lock) {
2237 down_read(&c->gc_lock);
2238 up_read(&c->gc_lock);
2241 if (ret == -EINTR) {
2242 trans_for_each_entry(trans, i) {
2243 int ret2 = bch2_btree_iter_traverse(i->iter);
2251 * BTREE_ITER_ATOMIC means we have to return -EINTR if we
2254 if (!(trans->flags & BTREE_INSERT_ATOMIC))
2261 int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
2268 return bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
2269 BTREE_INSERT_NOFAIL|
2270 BTREE_INSERT_USE_RESERVE|flags,
2271 BTREE_INSERT_ENTRY(iter, &k));
2274 int bch2_btree_insert_list_at(struct btree_iter *iter,
2275 struct keylist *keys,
2276 struct disk_reservation *disk_res,
2277 struct extent_insert_hook *hook,
2278 u64 *journal_seq, unsigned flags)
2280 BUG_ON(flags & BTREE_INSERT_ATOMIC);
2281 BUG_ON(bch2_keylist_empty(keys));
2282 verify_keys_sorted(keys);
2284 while (!bch2_keylist_empty(keys)) {
2285 /* need to traverse between each insert */
2286 int ret = bch2_btree_iter_traverse(iter);
2290 ret = bch2_btree_insert_at(iter->c, disk_res, hook,
2292 BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
2296 bch2_keylist_pop_front(keys);
2303 * bch_btree_insert - insert keys into the extent btree
2304 * @c: pointer to struct bch_fs
2305 * @id: btree to insert into
2306 * @insert_keys: list of keys to insert
2307 * @hook: insert callback
2309 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
2311 struct disk_reservation *disk_res,
2312 struct extent_insert_hook *hook,
2313 u64 *journal_seq, int flags)
2315 struct btree_iter iter;
2318 bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
2321 ret = bch2_btree_iter_traverse(&iter);
2325 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
2326 BTREE_INSERT_ENTRY(&iter, k));
2327 out: ret2 = bch2_btree_iter_unlock(&iter);
2333 * bch_btree_update - like bch2_btree_insert(), but asserts that we're
2334 * overwriting an existing key
2336 int bch2_btree_update(struct bch_fs *c, enum btree_id id,
2337 struct bkey_i *k, u64 *journal_seq)
2339 struct btree_iter iter;
2343 EBUG_ON(id == BTREE_ID_EXTENTS);
2345 bch2_btree_iter_init(&iter, c, id, k->k.p,
2348 u = bch2_btree_iter_peek_with_holes(&iter);
2349 ret = btree_iter_err(u);
2353 if (bkey_deleted(u.k)) {
2354 bch2_btree_iter_unlock(&iter);
2358 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, 0,
2359 BTREE_INSERT_ENTRY(&iter, k));
2360 bch2_btree_iter_unlock(&iter);
2365 * bch_btree_delete_range - delete everything within a given range
2367 * Range is a half open interval - [start, end)
2369 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
2372 struct bversion version,
2373 struct disk_reservation *disk_res,
2374 struct extent_insert_hook *hook,
2377 struct btree_iter iter;
2381 bch2_btree_iter_init(&iter, c, id, start,
2384 while ((k = bch2_btree_iter_peek(&iter)).k &&
2385 !(ret = btree_iter_err(k))) {
2386 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
2387 /* really shouldn't be using a bare, unpadded bkey_i */
2388 struct bkey_i delete;
2390 if (bkey_cmp(iter.pos, end) >= 0)
2393 bkey_init(&delete.k);
2396 * For extents, iter.pos won't necessarily be the same as
2397 * bkey_start_pos(k.k) (for non extents they always will be the
2398 * same). It's important that we delete starting from iter.pos
2399 * because the range we want to delete could start in the middle
2402 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
2403 * bkey_start_pos(k.k)).
2405 delete.k.p = iter.pos;
2406 delete.k.version = version;
2408 if (iter.flags & BTREE_ITER_IS_EXTENTS) {
2410 * The extents btree is special - KEY_TYPE_DISCARD is
2411 * used for deletions, not KEY_TYPE_DELETED. This is an
2412 * internal implementation detail that probably
2413 * shouldn't be exposed (internally, KEY_TYPE_DELETED is
2414 * used as a proxy for k->size == 0):
2416 delete.k.type = KEY_TYPE_DISCARD;
2418 /* create the biggest key we can */
2419 bch2_key_resize(&delete.k, max_sectors);
2420 bch2_cut_back(end, &delete.k);
2423 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
2424 BTREE_INSERT_NOFAIL,
2425 BTREE_INSERT_ENTRY(&iter, &delete));
2429 bch2_btree_iter_cond_resched(&iter);
2432 bch2_btree_iter_unlock(&iter);
2436 static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
2437 struct btree *b, unsigned flags,
2440 struct btree *n, *parent = iter->nodes[b->level + 1];
2441 struct btree_reserve *reserve;
2442 struct btree_interior_update *as;
2444 reserve = bch2_btree_reserve_get(c, b, 0, flags, cl);
2445 if (IS_ERR(reserve)) {
2446 trace_btree_gc_rewrite_node_fail(c, b);
2447 return PTR_ERR(reserve);
2450 as = bch2_btree_interior_update_alloc(c);
2452 bch2_btree_interior_update_will_free_node(c, as, b);
2454 n = bch2_btree_node_alloc_replacement(c, b, as, reserve);
2456 bch2_btree_build_aux_trees(n);
2457 six_unlock_write(&n->lock);
2459 trace_btree_gc_rewrite_node(c, b);
2461 bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
2464 bch2_btree_insert_node(parent, iter,
2465 &keylist_single(&n->key),
2468 bch2_btree_set_root(iter, n, as, reserve);
2471 bch2_btree_open_bucket_put(c, n);
2473 bch2_btree_node_free_inmem(iter, b);
2475 BUG_ON(!bch2_btree_iter_node_replace(iter, n));
2477 bch2_btree_reserve_put(c, reserve);
2482 * bch_btree_node_rewrite - Rewrite/move a btree node
2484 * Returns 0 on success, -EINTR or -EAGAIN on failure (i.e.
2485 * btree_check_reserve() has to wait)
2487 int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
2488 __le64 seq, unsigned flags)
2490 unsigned locks_want = iter->locks_want;
2495 flags |= BTREE_INSERT_NOFAIL;
2497 closure_init_stack(&cl);
2499 bch2_btree_iter_set_locks_want(iter, U8_MAX);
2501 if (!(flags & BTREE_INSERT_GC_LOCK_HELD)) {
2502 if (!down_read_trylock(&c->gc_lock)) {
2503 bch2_btree_iter_unlock(iter);
2504 down_read(&c->gc_lock);
2509 ret = bch2_btree_iter_traverse(iter);
2513 b = bch2_btree_iter_peek_node(iter);
2514 if (!b || b->data->keys.seq != seq)
2517 ret = __btree_node_rewrite(c, iter, b, flags, &cl);
2518 if (ret != -EAGAIN &&
2522 bch2_btree_iter_unlock(iter);
2526 bch2_btree_iter_set_locks_want(iter, locks_want);
2528 if (!(flags & BTREE_INSERT_GC_LOCK_HELD))
2529 up_read(&c->gc_lock);
2535 int bch2_btree_node_update_key(struct bch_fs *c, struct btree *b,
2536 struct bkey_i_extent *new_key)
2538 struct btree_interior_update *as;
2539 struct btree_reserve *reserve = NULL;
2540 struct btree *parent, *new_hash = NULL;
2541 struct btree_iter iter;
2543 bool must_rewrite_parent = false;
2546 __bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p,
2549 closure_init_stack(&cl);
2551 if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
2552 /* bch2_btree_reserve_get will unlock */
2554 ret = bch2_btree_node_cannibalize_lock(c, &cl);
2556 } while (ret == -EAGAIN);
2560 new_hash = bch2_btree_node_mem_alloc(c);
2563 reserve = bch2_btree_reserve_get(c, b, 0,
2564 BTREE_INSERT_NOFAIL|
2565 BTREE_INSERT_USE_RESERVE|
2566 BTREE_INSERT_USE_ALLOC_RESERVE,
2569 if (IS_ERR(reserve)) {
2570 ret = PTR_ERR(reserve);
2571 if (ret == -EAGAIN || ret == -EINTR)
2576 down_read(&c->gc_lock);
2578 ret = bch2_btree_iter_traverse(&iter);
2582 mutex_lock(&c->btree_interior_update_lock);
2585 * Two corner cases that need to be thought about here:
2587 * @b may not be reachable yet - there might be another interior update
2588 * operation waiting on @b to be written, and we're gonna deliver the
2589 * write completion to that interior update operation _before_
2590 * persisting the new_key update
2592 * That ends up working without us having to do anything special here:
2593 * the reason is, we do kick off (and do the in memory updates) for the
2594 * update for @new_key before we return, creating a new interior_update
2597 * The new interior update operation here will in effect override the
2598 * previous one. The previous one was going to terminate - make @b
2599 * reachable - in one of two ways:
2600 * - updating the btree root pointer
2602 * no, this doesn't work. argh.
2605 if (b->will_make_reachable)
2606 must_rewrite_parent = true;
2608 /* other case: btree node being freed */
2609 if (iter.nodes[b->level] != b) {
2610 /* node has been freed: */
2611 BUG_ON(btree_node_hashed(b));
2612 mutex_unlock(&c->btree_interior_update_lock);
2616 mutex_unlock(&c->btree_interior_update_lock);
2618 ret = bch2_check_mark_super(c, extent_i_to_s_c(new_key), BCH_DATA_BTREE);
2622 as = bch2_btree_interior_update_alloc(c);
2624 if (must_rewrite_parent)
2625 as->flags |= BTREE_INTERIOR_UPDATE_MUST_REWRITE;
2627 bch2_btree_interior_update_add_node_reference(c, as, b);
2630 bkey_copy(&new_hash->key, &new_key->k_i);
2631 BUG_ON(bch2_btree_node_hash_insert(c, new_hash,
2632 b->level, b->btree_id));
2635 parent = iter.nodes[b->level + 1];
2637 bch2_btree_insert_node(parent, &iter,
2638 &keylist_single(&b->key),
2641 bch2_btree_set_root(&iter, b, as, reserve);
2645 mutex_lock(&c->btree_cache_lock);
2646 bch2_btree_node_hash_remove(c, b);
2648 bkey_copy(&b->key, &new_key->k_i);
2649 __bch2_btree_node_hash_insert(c, b);
2651 bch2_btree_node_hash_remove(c, new_hash);
2652 mutex_unlock(&c->btree_cache_lock);
2654 bkey_copy(&b->key, &new_key->k_i);
2657 if (!IS_ERR_OR_NULL(reserve))
2658 bch2_btree_reserve_put(c, reserve);
2660 mutex_lock(&c->btree_cache_lock);
2661 list_move(&b->list, &c->btree_cache_freeable);
2662 mutex_unlock(&c->btree_cache_lock);
2664 six_unlock_write(&new_hash->lock);
2665 six_unlock_intent(&new_hash->lock);
2667 bch2_btree_iter_unlock(&iter);
2668 up_read(&c->gc_lock);