1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
5 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
11 #include "btree_iter.h"
12 #include "btree_locking.h"
17 #include "journal_reclaim.h"
22 #include <linux/random.h>
23 #include <trace/events/bcachefs.h>
25 static void bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
26 struct btree_iter *, struct btree *,
27 struct keylist *, unsigned);
32 * Verify that child nodes correctly span parent node's range:
34 static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
36 #ifdef CONFIG_BCACHEFS_DEBUG
37 struct bpos next_node = b->data->min_key;
38 struct btree_node_iter iter;
40 struct bkey_s_c_btree_ptr_v2 bp;
42 char buf1[100], buf2[100];
46 if (!test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags))
49 bch2_btree_node_iter_init_from_start(&iter, b);
52 k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked);
53 if (k.k->type != KEY_TYPE_btree_ptr_v2)
55 bp = bkey_s_c_to_btree_ptr_v2(k);
57 if (bpos_cmp(next_node, bp.v->min_key)) {
58 bch2_dump_btree_node(c, b);
59 panic("expected next min_key %s got %s\n",
60 (bch2_bpos_to_text(&PBUF(buf1), next_node), buf1),
61 (bch2_bpos_to_text(&PBUF(buf2), bp.v->min_key), buf2));
64 bch2_btree_node_iter_advance(&iter, b);
66 if (bch2_btree_node_iter_end(&iter)) {
67 if (bpos_cmp(k.k->p, b->key.k.p)) {
68 bch2_dump_btree_node(c, b);
69 panic("expected end %s got %s\n",
70 (bch2_bpos_to_text(&PBUF(buf1), b->key.k.p), buf1),
71 (bch2_bpos_to_text(&PBUF(buf2), k.k->p), buf2));
76 next_node = bpos_successor(k.k->p);
81 /* Calculate ideal packed bkey format for new btree nodes: */
83 void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
85 struct bkey_packed *k;
90 bset_tree_for_each_key(b, t, k)
91 if (!bkey_deleted(k)) {
92 uk = bkey_unpack_key(b, k);
93 bch2_bkey_format_add_key(s, &uk);
97 static struct bkey_format bch2_btree_calc_format(struct btree *b)
99 struct bkey_format_state s;
101 bch2_bkey_format_init(&s);
102 bch2_bkey_format_add_pos(&s, b->data->min_key);
103 bch2_bkey_format_add_pos(&s, b->data->max_key);
104 __bch2_btree_calc_format(&s, b);
106 return bch2_bkey_format_done(&s);
109 static size_t btree_node_u64s_with_format(struct btree *b,
110 struct bkey_format *new_f)
112 struct bkey_format *old_f = &b->format;
114 /* stupid integer promotion rules */
116 (((int) new_f->key_u64s - old_f->key_u64s) *
117 (int) b->nr.packed_keys) +
118 (((int) new_f->key_u64s - BKEY_U64s) *
119 (int) b->nr.unpacked_keys);
121 BUG_ON(delta + b->nr.live_u64s < 0);
123 return b->nr.live_u64s + delta;
127 * btree_node_format_fits - check if we could rewrite node with a new format
129 * This assumes all keys can pack with the new format -- it just checks if
130 * the re-packed keys would fit inside the node itself.
132 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
133 struct bkey_format *new_f)
135 size_t u64s = btree_node_u64s_with_format(b, new_f);
137 return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
140 /* Btree node freeing/allocation: */
142 static void __btree_node_free(struct bch_fs *c, struct btree *b)
144 trace_btree_node_free(c, b);
146 BUG_ON(btree_node_dirty(b));
147 BUG_ON(btree_node_need_write(b));
148 BUG_ON(b == btree_node_root(c, b));
150 BUG_ON(!list_empty(&b->write_blocked));
151 BUG_ON(b->will_make_reachable);
153 clear_btree_node_noevict(b);
155 bch2_btree_node_hash_remove(&c->btree_cache, b);
157 mutex_lock(&c->btree_cache.lock);
158 list_move(&b->list, &c->btree_cache.freeable);
159 mutex_unlock(&c->btree_cache.lock);
162 void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
164 struct open_buckets ob = b->ob;
168 clear_btree_node_dirty(c, b);
170 btree_node_lock_type(c, b, SIX_LOCK_write);
171 __btree_node_free(c, b);
172 six_unlock_write(&b->c.lock);
174 bch2_open_buckets_put(c, &ob);
177 void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
178 struct btree_iter *iter)
180 struct btree_iter *linked;
182 trans_for_each_iter(iter->trans, linked)
183 BUG_ON(linked->l[b->c.level].b == b);
185 six_lock_write(&b->c.lock, NULL, NULL);
186 __btree_node_free(c, b);
187 six_unlock_write(&b->c.lock);
188 six_unlock_intent(&b->c.lock);
191 static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
192 struct disk_reservation *res,
196 struct write_point *wp;
198 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
199 struct open_buckets ob = { .nr = 0 };
200 struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
202 enum alloc_reserve alloc_reserve;
204 if (flags & BTREE_INSERT_USE_RESERVE) {
206 alloc_reserve = RESERVE_BTREE_MOVINGGC;
208 nr_reserve = BTREE_NODE_RESERVE;
209 alloc_reserve = RESERVE_BTREE;
212 mutex_lock(&c->btree_reserve_cache_lock);
213 if (c->btree_reserve_cache_nr > nr_reserve) {
214 struct btree_alloc *a =
215 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
218 bkey_copy(&tmp.k, &a->k);
219 mutex_unlock(&c->btree_reserve_cache_lock);
222 mutex_unlock(&c->btree_reserve_cache_lock);
225 wp = bch2_alloc_sectors_start(c,
226 c->opts.metadata_target ?:
227 c->opts.foreground_target,
229 writepoint_ptr(&c->btree_write_point),
232 c->opts.metadata_replicas_required,
233 alloc_reserve, 0, cl);
237 if (wp->sectors_free < c->opts.btree_node_size) {
238 struct open_bucket *ob;
241 open_bucket_for_each(c, &wp->ptrs, ob, i)
242 if (ob->sectors_free < c->opts.btree_node_size)
243 ob->sectors_free = 0;
245 bch2_alloc_sectors_done(c, wp);
249 bkey_btree_ptr_v2_init(&tmp.k);
250 bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, c->opts.btree_node_size);
252 bch2_open_bucket_get(c, wp, &ob);
253 bch2_alloc_sectors_done(c, wp);
255 b = bch2_btree_node_mem_alloc(c);
257 /* we hold cannibalize_lock: */
261 bkey_copy(&b->key, &tmp.k);
267 static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned level)
269 struct bch_fs *c = as->c;
273 BUG_ON(level >= BTREE_MAX_DEPTH);
274 BUG_ON(!as->nr_prealloc_nodes);
276 b = as->prealloc_nodes[--as->nr_prealloc_nodes];
278 set_btree_node_accessed(b);
279 set_btree_node_dirty(c, b);
280 set_btree_node_need_write(b);
282 bch2_bset_init_first(b, &b->data->keys);
284 b->c.btree_id = as->btree_id;
285 b->version_ondisk = c->sb.version;
287 memset(&b->nr, 0, sizeof(b->nr));
288 b->data->magic = cpu_to_le64(bset_magic(c));
289 memset(&b->data->_ptr, 0, sizeof(b->data->_ptr));
291 SET_BTREE_NODE_ID(b->data, as->btree_id);
292 SET_BTREE_NODE_LEVEL(b->data, level);
294 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
295 struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(&b->key);
298 bp->v.seq = b->data->keys.seq;
299 bp->v.sectors_written = 0;
302 SET_BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data, true);
304 bch2_btree_build_aux_trees(b);
306 ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id);
309 trace_btree_node_alloc(c, b);
313 static void btree_set_min(struct btree *b, struct bpos pos)
315 if (b->key.k.type == KEY_TYPE_btree_ptr_v2)
316 bkey_i_to_btree_ptr_v2(&b->key)->v.min_key = pos;
317 b->data->min_key = pos;
320 static void btree_set_max(struct btree *b, struct bpos pos)
323 b->data->max_key = pos;
326 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
328 struct bkey_format format)
332 n = bch2_btree_node_alloc(as, b->c.level);
334 SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
336 btree_set_min(n, b->data->min_key);
337 btree_set_max(n, b->data->max_key);
339 n->data->format = format;
340 btree_node_set_format(n, format);
342 bch2_btree_sort_into(as->c, n, b);
344 btree_node_reset_sib_u64s(n);
346 n->key.k.p = b->key.k.p;
350 static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
353 struct bkey_format new_f = bch2_btree_calc_format(b);
356 * The keys might expand with the new format - if they wouldn't fit in
357 * the btree node anymore, use the old format for now:
359 if (!bch2_btree_node_format_fits(as->c, b, &new_f))
362 return __bch2_btree_node_alloc_replacement(as, b, new_f);
365 static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level)
367 struct btree *b = bch2_btree_node_alloc(as, level);
369 btree_set_min(b, POS_MIN);
370 btree_set_max(b, SPOS_MAX);
371 b->data->format = bch2_btree_calc_format(b);
373 btree_node_set_format(b, b->data->format);
374 bch2_btree_build_aux_trees(b);
376 bch2_btree_update_add_new_node(as, b);
377 six_unlock_write(&b->c.lock);
382 static void bch2_btree_reserve_put(struct btree_update *as)
384 struct bch_fs *c = as->c;
386 mutex_lock(&c->btree_reserve_cache_lock);
388 while (as->nr_prealloc_nodes) {
389 struct btree *b = as->prealloc_nodes[--as->nr_prealloc_nodes];
391 six_unlock_write(&b->c.lock);
393 if (c->btree_reserve_cache_nr <
394 ARRAY_SIZE(c->btree_reserve_cache)) {
395 struct btree_alloc *a =
396 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
400 bkey_copy(&a->k, &b->key);
402 bch2_open_buckets_put(c, &b->ob);
405 btree_node_lock_type(c, b, SIX_LOCK_write);
406 __btree_node_free(c, b);
407 six_unlock_write(&b->c.lock);
409 six_unlock_intent(&b->c.lock);
412 mutex_unlock(&c->btree_reserve_cache_lock);
415 static int bch2_btree_reserve_get(struct btree_update *as, unsigned nr_nodes,
416 unsigned flags, struct closure *cl)
418 struct bch_fs *c = as->c;
422 BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
425 * Protects reaping from the btree node cache and using the btree node
426 * open bucket reserve:
428 ret = bch2_btree_cache_cannibalize_lock(c, cl);
432 while (as->nr_prealloc_nodes < nr_nodes) {
433 b = __bch2_btree_node_alloc(c, &as->disk_res,
434 flags & BTREE_INSERT_NOWAIT
441 as->prealloc_nodes[as->nr_prealloc_nodes++] = b;
444 bch2_btree_cache_cannibalize_unlock(c);
447 bch2_btree_cache_cannibalize_unlock(c);
448 trace_btree_reserve_get_fail(c, nr_nodes, cl);
452 /* Asynchronous interior node update machinery */
454 static void bch2_btree_update_free(struct btree_update *as)
456 struct bch_fs *c = as->c;
458 if (as->took_gc_lock)
459 up_read(&c->gc_lock);
460 as->took_gc_lock = false;
462 bch2_journal_preres_put(&c->journal, &as->journal_preres);
464 bch2_journal_pin_drop(&c->journal, &as->journal);
465 bch2_journal_pin_flush(&c->journal, &as->journal);
466 bch2_disk_reservation_put(c, &as->disk_res);
467 bch2_btree_reserve_put(as);
469 mutex_lock(&c->btree_interior_update_lock);
470 list_del(&as->unwritten_list);
472 mutex_unlock(&c->btree_interior_update_lock);
474 closure_debug_destroy(&as->cl);
475 mempool_free(as, &c->btree_interior_update_pool);
477 closure_wake_up(&c->btree_interior_update_wait);
480 static void btree_update_will_delete_key(struct btree_update *as,
483 BUG_ON(bch2_keylist_u64s(&as->old_keys) + k->k.u64s >
484 ARRAY_SIZE(as->_old_keys));
485 bch2_keylist_add(&as->old_keys, k);
488 static void btree_update_will_add_key(struct btree_update *as,
491 BUG_ON(bch2_keylist_u64s(&as->new_keys) + k->k.u64s >
492 ARRAY_SIZE(as->_new_keys));
493 bch2_keylist_add(&as->new_keys, k);
497 * The transactional part of an interior btree node update, where we journal the
498 * update we did to the interior node and update alloc info:
500 static int btree_update_nodes_written_trans(struct btree_trans *trans,
501 struct btree_update *as)
506 trans->extra_journal_entries = (void *) &as->journal_entries[0];
507 trans->extra_journal_entry_u64s = as->journal_u64s;
508 trans->journal_pin = &as->journal;
510 for_each_keylist_key(&as->new_keys, k) {
511 ret = bch2_trans_mark_key(trans,
514 BTREE_TRIGGER_INSERT);
519 for_each_keylist_key(&as->old_keys, k) {
520 ret = bch2_trans_mark_key(trans,
523 BTREE_TRIGGER_OVERWRITE);
531 static void btree_update_nodes_written(struct btree_update *as)
533 struct bch_fs *c = as->c;
534 struct btree *b = as->b;
535 struct btree_trans trans;
541 * If we're already in an error state, it might be because a btree node
542 * was never written, and we might be trying to free that same btree
543 * node here, but it won't have been marked as allocated and we'll see
544 * spurious disk usage inconsistencies in the transactional part below
545 * if we don't skip it:
547 ret = bch2_journal_error(&c->journal);
551 BUG_ON(!journal_pin_active(&as->journal));
554 * Wait for any in flight writes to finish before we free the old nodes
557 for (i = 0; i < as->nr_old_nodes; i++) {
558 struct btree *old = as->old_nodes[i];
561 six_lock_read(&old->c.lock, NULL, NULL);
562 seq = old->data ? old->data->keys.seq : 0;
563 six_unlock_read(&old->c.lock);
565 if (seq == as->old_nodes_seq[i])
566 wait_on_bit_io(&old->flags, BTREE_NODE_write_in_flight_inner,
567 TASK_UNINTERRUPTIBLE);
571 * We did an update to a parent node where the pointers we added pointed
572 * to child nodes that weren't written yet: now, the child nodes have
573 * been written so we can write out the update to the interior node.
577 * We can't call into journal reclaim here: we'd block on the journal
578 * reclaim lock, but we may need to release the open buckets we have
579 * pinned in order for other btree updates to make forward progress, and
580 * journal reclaim does btree updates when flushing bkey_cached entries,
581 * which may require allocations as well.
583 bch2_trans_init(&trans, c, 0, 512);
584 ret = __bch2_trans_do(&trans, &as->disk_res, &journal_seq,
586 BTREE_INSERT_NOCHECK_RW|
587 BTREE_INSERT_JOURNAL_RECLAIM|
588 BTREE_INSERT_JOURNAL_RESERVED,
589 btree_update_nodes_written_trans(&trans, as));
590 bch2_trans_exit(&trans);
592 bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
593 "error %i in btree_update_nodes_written()", ret);
597 * @b is the node we did the final insert into:
599 * On failure to get a journal reservation, we still have to
600 * unblock the write and allow most of the write path to happen
601 * so that shutdown works, but the i->journal_seq mechanism
602 * won't work to prevent the btree write from being visible (we
603 * didn't get a journal sequence number) - instead
604 * __bch2_btree_node_write() doesn't do the actual write if
605 * we're in journal error state:
608 btree_node_lock_type(c, b, SIX_LOCK_intent);
609 btree_node_lock_type(c, b, SIX_LOCK_write);
610 mutex_lock(&c->btree_interior_update_lock);
612 list_del(&as->write_blocked_list);
615 * Node might have been freed, recheck under
616 * btree_interior_update_lock:
619 struct bset *i = btree_bset_last(b);
622 BUG_ON(!btree_node_dirty(b));
625 i->journal_seq = cpu_to_le64(
627 le64_to_cpu(i->journal_seq)));
629 bch2_btree_add_journal_pin(c, b, journal_seq);
632 * If we didn't get a journal sequence number we
633 * can't write this btree node, because recovery
634 * won't know to ignore this write:
636 set_btree_node_never_write(b);
640 mutex_unlock(&c->btree_interior_update_lock);
641 six_unlock_write(&b->c.lock);
643 btree_node_write_if_need(c, b, SIX_LOCK_intent);
644 six_unlock_intent(&b->c.lock);
647 bch2_journal_pin_drop(&c->journal, &as->journal);
649 bch2_journal_preres_put(&c->journal, &as->journal_preres);
651 mutex_lock(&c->btree_interior_update_lock);
652 for (i = 0; i < as->nr_new_nodes; i++) {
653 b = as->new_nodes[i];
655 BUG_ON(b->will_make_reachable != (unsigned long) as);
656 b->will_make_reachable = 0;
658 mutex_unlock(&c->btree_interior_update_lock);
660 for (i = 0; i < as->nr_new_nodes; i++) {
661 b = as->new_nodes[i];
663 btree_node_lock_type(c, b, SIX_LOCK_read);
664 btree_node_write_if_need(c, b, SIX_LOCK_read);
665 six_unlock_read(&b->c.lock);
668 for (i = 0; i < as->nr_open_buckets; i++)
669 bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]);
671 bch2_btree_update_free(as);
674 static void btree_interior_update_work(struct work_struct *work)
677 container_of(work, struct bch_fs, btree_interior_update_work);
678 struct btree_update *as;
681 mutex_lock(&c->btree_interior_update_lock);
682 as = list_first_entry_or_null(&c->btree_interior_updates_unwritten,
683 struct btree_update, unwritten_list);
684 if (as && !as->nodes_written)
686 mutex_unlock(&c->btree_interior_update_lock);
691 btree_update_nodes_written(as);
695 static void btree_update_set_nodes_written(struct closure *cl)
697 struct btree_update *as = container_of(cl, struct btree_update, cl);
698 struct bch_fs *c = as->c;
700 mutex_lock(&c->btree_interior_update_lock);
701 as->nodes_written = true;
702 mutex_unlock(&c->btree_interior_update_lock);
704 queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work);
708 * We're updating @b with pointers to nodes that haven't finished writing yet:
709 * block @b from being written until @as completes
711 static void btree_update_updated_node(struct btree_update *as, struct btree *b)
713 struct bch_fs *c = as->c;
715 mutex_lock(&c->btree_interior_update_lock);
716 list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
718 BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
719 BUG_ON(!btree_node_dirty(b));
721 as->mode = BTREE_INTERIOR_UPDATING_NODE;
723 list_add(&as->write_blocked_list, &b->write_blocked);
725 mutex_unlock(&c->btree_interior_update_lock);
728 static void btree_update_reparent(struct btree_update *as,
729 struct btree_update *child)
731 struct bch_fs *c = as->c;
733 lockdep_assert_held(&c->btree_interior_update_lock);
736 child->mode = BTREE_INTERIOR_UPDATING_AS;
738 bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, NULL);
741 static void btree_update_updated_root(struct btree_update *as, struct btree *b)
743 struct bkey_i *insert = &b->key;
744 struct bch_fs *c = as->c;
746 BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
748 BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
749 ARRAY_SIZE(as->journal_entries));
752 journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
753 BCH_JSET_ENTRY_btree_root,
754 b->c.btree_id, b->c.level,
755 insert, insert->k.u64s);
757 mutex_lock(&c->btree_interior_update_lock);
758 list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
760 as->mode = BTREE_INTERIOR_UPDATING_ROOT;
761 mutex_unlock(&c->btree_interior_update_lock);
765 * bch2_btree_update_add_new_node:
767 * This causes @as to wait on @b to be written, before it gets to
768 * bch2_btree_update_nodes_written
770 * Additionally, it sets b->will_make_reachable to prevent any additional writes
771 * to @b from happening besides the first until @b is reachable on disk
773 * And it adds @b to the list of @as's new nodes, so that we can update sector
774 * counts in bch2_btree_update_nodes_written:
776 void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
778 struct bch_fs *c = as->c;
780 closure_get(&as->cl);
782 mutex_lock(&c->btree_interior_update_lock);
783 BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
784 BUG_ON(b->will_make_reachable);
786 as->new_nodes[as->nr_new_nodes++] = b;
787 b->will_make_reachable = 1UL|(unsigned long) as;
789 mutex_unlock(&c->btree_interior_update_lock);
791 btree_update_will_add_key(as, &b->key);
795 * returns true if @b was a new node
797 static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
799 struct btree_update *as;
803 mutex_lock(&c->btree_interior_update_lock);
805 * When b->will_make_reachable != 0, it owns a ref on as->cl that's
806 * dropped when it gets written by bch2_btree_complete_write - the
807 * xchg() is for synchronization with bch2_btree_complete_write:
809 v = xchg(&b->will_make_reachable, 0);
810 as = (struct btree_update *) (v & ~1UL);
813 mutex_unlock(&c->btree_interior_update_lock);
817 for (i = 0; i < as->nr_new_nodes; i++)
818 if (as->new_nodes[i] == b)
823 array_remove_item(as->new_nodes, as->nr_new_nodes, i);
824 mutex_unlock(&c->btree_interior_update_lock);
827 closure_put(&as->cl);
830 void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
833 as->open_buckets[as->nr_open_buckets++] =
838 * @b is being split/rewritten: it may have pointers to not-yet-written btree
839 * nodes and thus outstanding btree_updates - redirect @b's
840 * btree_updates to point to this btree_update:
842 void bch2_btree_interior_update_will_free_node(struct btree_update *as,
845 struct bch_fs *c = as->c;
846 struct btree_update *p, *n;
847 struct btree_write *w;
849 set_btree_node_dying(b);
851 if (btree_node_fake(b))
854 mutex_lock(&c->btree_interior_update_lock);
857 * Does this node have any btree_update operations preventing
858 * it from being written?
860 * If so, redirect them to point to this btree_update: we can
861 * write out our new nodes, but we won't make them visible until those
862 * operations complete
864 list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
865 list_del_init(&p->write_blocked_list);
866 btree_update_reparent(as, p);
869 * for flush_held_btree_writes() waiting on updates to flush or
870 * nodes to be writeable:
872 closure_wake_up(&c->btree_interior_update_wait);
875 clear_btree_node_dirty(c, b);
876 clear_btree_node_need_write(b);
879 * Does this node have unwritten data that has a pin on the journal?
881 * If so, transfer that pin to the btree_update operation -
882 * note that if we're freeing multiple nodes, we only need to keep the
883 * oldest pin of any of the nodes we're freeing. We'll release the pin
884 * when the new nodes are persistent and reachable on disk:
886 w = btree_current_write(b);
887 bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL);
888 bch2_journal_pin_drop(&c->journal, &w->journal);
890 w = btree_prev_write(b);
891 bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL);
892 bch2_journal_pin_drop(&c->journal, &w->journal);
894 mutex_unlock(&c->btree_interior_update_lock);
897 * Is this a node that isn't reachable on disk yet?
899 * Nodes that aren't reachable yet have writes blocked until they're
900 * reachable - now that we've cancelled any pending writes and moved
901 * things waiting on that write to wait on this update, we can drop this
902 * node from the list of nodes that the other update is making
903 * reachable, prior to freeing it:
905 btree_update_drop_new_node(c, b);
907 btree_update_will_delete_key(as, &b->key);
909 as->old_nodes[as->nr_old_nodes] = b;
910 as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq;
914 void bch2_btree_update_done(struct btree_update *as)
916 BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
918 if (as->took_gc_lock)
919 up_read(&as->c->gc_lock);
920 as->took_gc_lock = false;
922 bch2_btree_reserve_put(as);
924 continue_at(&as->cl, btree_update_set_nodes_written,
925 as->c->btree_interior_update_worker);
928 struct btree_update *
929 bch2_btree_update_start(struct btree_iter *iter, unsigned level,
930 unsigned nr_nodes, unsigned flags)
932 struct btree_trans *trans = iter->trans;
933 struct bch_fs *c = trans->c;
934 struct btree_update *as;
936 int disk_res_flags = (flags & BTREE_INSERT_NOFAIL)
937 ? BCH_DISK_RESERVATION_NOFAIL : 0;
938 int journal_flags = 0;
941 BUG_ON(!iter->should_be_locked);
943 if (flags & BTREE_INSERT_JOURNAL_RESERVED)
944 journal_flags |= JOURNAL_RES_GET_RESERVED;
946 closure_init_stack(&cl);
950 * XXX: figure out how far we might need to split,
951 * instead of locking/reserving all the way to the root:
953 if (!bch2_btree_iter_upgrade(iter, U8_MAX)) {
954 trace_trans_restart_iter_upgrade(trans->ip, _RET_IP_,
957 return ERR_PTR(-EINTR);
960 if (flags & BTREE_INSERT_GC_LOCK_HELD)
961 lockdep_assert_held(&c->gc_lock);
962 else if (!down_read_trylock(&c->gc_lock)) {
963 if (flags & BTREE_INSERT_NOUNLOCK)
964 return ERR_PTR(-EINTR);
966 bch2_trans_unlock(trans);
967 down_read(&c->gc_lock);
968 if (!bch2_trans_relock(trans)) {
969 up_read(&c->gc_lock);
970 return ERR_PTR(-EINTR);
974 as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
975 memset(as, 0, sizeof(*as));
976 closure_init(&as->cl, NULL);
978 as->mode = BTREE_INTERIOR_NO_UPDATE;
979 as->took_gc_lock = !(flags & BTREE_INSERT_GC_LOCK_HELD);
980 as->btree_id = iter->btree_id;
981 INIT_LIST_HEAD(&as->list);
982 INIT_LIST_HEAD(&as->unwritten_list);
983 INIT_LIST_HEAD(&as->write_blocked_list);
984 bch2_keylist_init(&as->old_keys, as->_old_keys);
985 bch2_keylist_init(&as->new_keys, as->_new_keys);
986 bch2_keylist_init(&as->parent_keys, as->inline_keys);
988 mutex_lock(&c->btree_interior_update_lock);
989 list_add_tail(&as->list, &c->btree_interior_update_list);
990 mutex_unlock(&c->btree_interior_update_lock);
993 * We don't want to allocate if we're in an error state, that can cause
994 * deadlock on emergency shutdown due to open buckets getting stuck in
995 * the btree_reserve_cache after allocator shutdown has cleared it out.
996 * This check needs to come after adding us to the btree_interior_update
997 * list but before calling bch2_btree_reserve_get, to synchronize with
998 * __bch2_fs_read_only().
1000 ret = bch2_journal_error(&c->journal);
1004 ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
1005 BTREE_UPDATE_JOURNAL_RES,
1006 journal_flags|JOURNAL_RES_GET_NONBLOCK);
1007 if (ret == -EAGAIN) {
1009 * this would be cleaner if bch2_journal_preres_get() took a
1012 if (flags & BTREE_INSERT_NOUNLOCK) {
1013 trace_trans_restart_journal_preres_get(trans->ip, _RET_IP_);
1018 bch2_trans_unlock(trans);
1020 if (flags & BTREE_INSERT_JOURNAL_RECLAIM) {
1021 bch2_btree_update_free(as);
1022 return ERR_PTR(ret);
1025 ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
1026 BTREE_UPDATE_JOURNAL_RES,
1029 trace_trans_restart_journal_preres_get(trans->ip, _RET_IP_);
1033 if (!bch2_trans_relock(trans)) {
1039 ret = bch2_disk_reservation_get(c, &as->disk_res,
1040 nr_nodes * c->opts.btree_node_size,
1041 c->opts.metadata_replicas,
1046 ret = bch2_btree_reserve_get(as, nr_nodes, flags,
1047 !(flags & BTREE_INSERT_NOUNLOCK) ? &cl : NULL);
1051 bch2_journal_pin_add(&c->journal,
1052 atomic64_read(&c->journal.seq),
1053 &as->journal, NULL);
1057 bch2_btree_update_free(as);
1059 if (ret == -EAGAIN) {
1060 BUG_ON(flags & BTREE_INSERT_NOUNLOCK);
1062 bch2_trans_unlock(trans);
1067 if (ret == -EINTR && bch2_trans_relock(trans))
1070 return ERR_PTR(ret);
1073 /* Btree root updates: */
1075 static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
1077 /* Root nodes cannot be reaped */
1078 mutex_lock(&c->btree_cache.lock);
1079 list_del_init(&b->list);
1080 mutex_unlock(&c->btree_cache.lock);
1083 six_lock_pcpu_alloc(&b->c.lock);
1085 six_lock_pcpu_free(&b->c.lock);
1087 mutex_lock(&c->btree_root_lock);
1088 BUG_ON(btree_node_root(c, b) &&
1089 (b->c.level < btree_node_root(c, b)->c.level ||
1090 !btree_node_dying(btree_node_root(c, b))));
1092 btree_node_root(c, b) = b;
1093 mutex_unlock(&c->btree_root_lock);
1095 bch2_recalc_btree_reserve(c);
1099 * bch_btree_set_root - update the root in memory and on disk
1101 * To ensure forward progress, the current task must not be holding any
1102 * btree node write locks. However, you must hold an intent lock on the
1105 * Note: This allocates a journal entry but doesn't add any keys to
1106 * it. All the btree roots are part of every journal write, so there
1107 * is nothing new to be done. This just guarantees that there is a
1110 static void bch2_btree_set_root(struct btree_update *as, struct btree *b,
1111 struct btree_iter *iter)
1113 struct bch_fs *c = as->c;
1116 trace_btree_set_root(c, b);
1117 BUG_ON(!b->written &&
1118 !test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags));
1120 old = btree_node_root(c, b);
1123 * Ensure no one is using the old root while we switch to the
1126 bch2_btree_node_lock_write(old, iter);
1128 bch2_btree_set_root_inmem(c, b);
1130 btree_update_updated_root(as, b);
1133 * Unlock old root after new root is visible:
1135 * The new root isn't persistent, but that's ok: we still have
1136 * an intent lock on the new root, and any updates that would
1137 * depend on the new root would have to update the new root.
1139 bch2_btree_node_unlock_write(old, iter);
1142 /* Interior node updates: */
1144 static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b,
1145 struct btree_iter *iter,
1146 struct bkey_i *insert,
1147 struct btree_node_iter *node_iter)
1149 struct bch_fs *c = as->c;
1150 struct bkey_packed *k;
1151 const char *invalid;
1153 BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
1154 !btree_ptr_sectors_written(insert));
1156 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b)) ?:
1157 bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert));
1161 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(insert));
1162 bch2_fs_inconsistent(c, "inserting invalid bkey %s: %s", buf, invalid);
1166 BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
1167 ARRAY_SIZE(as->journal_entries));
1170 journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
1171 BCH_JSET_ENTRY_btree_keys,
1172 b->c.btree_id, b->c.level,
1173 insert, insert->k.u64s);
1175 while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
1176 bkey_iter_pos_cmp(b, k, &insert->k.p) < 0)
1177 bch2_btree_node_iter_advance(node_iter, b);
1179 bch2_btree_bset_insert_key(iter, b, node_iter, insert);
1180 set_btree_node_dirty(c, b);
1181 set_btree_node_need_write(b);
1185 __bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
1186 struct btree_iter *iter, struct keylist *keys,
1187 struct btree_node_iter node_iter)
1189 struct bkey_i *insert = bch2_keylist_front(keys);
1190 struct bkey_packed *k;
1192 BUG_ON(btree_node_type(b) != BKEY_TYPE_btree);
1194 while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
1195 (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0))
1198 while (!bch2_keylist_empty(keys)) {
1199 bch2_insert_fixup_btree_ptr(as, b, iter,
1200 bch2_keylist_front(keys), &node_iter);
1201 bch2_keylist_pop_front(keys);
1206 * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1209 static struct btree *__btree_split_node(struct btree_update *as,
1211 struct btree_iter *iter)
1213 struct bkey_format_state s;
1214 size_t nr_packed = 0, nr_unpacked = 0;
1216 struct bset *set1, *set2;
1217 struct bkey_packed *k, *set2_start, *set2_end, *out, *prev = NULL;
1220 n2 = bch2_btree_node_alloc(as, n1->c.level);
1221 bch2_btree_update_add_new_node(as, n2);
1223 n2->data->max_key = n1->data->max_key;
1224 n2->data->format = n1->format;
1225 SET_BTREE_NODE_SEQ(n2->data, BTREE_NODE_SEQ(n1->data));
1226 n2->key.k.p = n1->key.k.p;
1228 set1 = btree_bset_first(n1);
1229 set2 = btree_bset_first(n2);
1232 * Has to be a linear search because we don't have an auxiliary
1237 struct bkey_packed *n = bkey_next(k);
1239 if (n == vstruct_last(set1))
1241 if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
1255 set2_end = vstruct_last(set1);
1257 set1->u64s = cpu_to_le16((u64 *) set2_start - set1->_data);
1258 set_btree_bset_end(n1, n1->set);
1260 n1->nr.live_u64s = le16_to_cpu(set1->u64s);
1261 n1->nr.bset_u64s[0] = le16_to_cpu(set1->u64s);
1262 n1->nr.packed_keys = nr_packed;
1263 n1->nr.unpacked_keys = nr_unpacked;
1265 n1_pos = bkey_unpack_pos(n1, prev);
1266 if (as->c->sb.version < bcachefs_metadata_version_snapshot)
1267 n1_pos.snapshot = U32_MAX;
1269 btree_set_max(n1, n1_pos);
1270 btree_set_min(n2, bpos_successor(n1->key.k.p));
1272 bch2_bkey_format_init(&s);
1273 bch2_bkey_format_add_pos(&s, n2->data->min_key);
1274 bch2_bkey_format_add_pos(&s, n2->data->max_key);
1276 for (k = set2_start; k != set2_end; k = bkey_next(k)) {
1277 struct bkey uk = bkey_unpack_key(n1, k);
1278 bch2_bkey_format_add_key(&s, &uk);
1281 n2->data->format = bch2_bkey_format_done(&s);
1282 btree_node_set_format(n2, n2->data->format);
1285 memset(&n2->nr, 0, sizeof(n2->nr));
1287 for (k = set2_start; k != set2_end; k = bkey_next(k)) {
1288 BUG_ON(!bch2_bkey_transform(&n2->format, out, bkey_packed(k)
1289 ? &n1->format : &bch2_bkey_format_current, k));
1290 out->format = KEY_FORMAT_LOCAL_BTREE;
1291 btree_keys_account_key_add(&n2->nr, 0, out);
1292 out = bkey_next(out);
1295 set2->u64s = cpu_to_le16((u64 *) out - set2->_data);
1296 set_btree_bset_end(n2, n2->set);
1298 BUG_ON(!set1->u64s);
1299 BUG_ON(!set2->u64s);
1301 btree_node_reset_sib_u64s(n1);
1302 btree_node_reset_sib_u64s(n2);
1304 bch2_verify_btree_nr_keys(n1);
1305 bch2_verify_btree_nr_keys(n2);
1308 btree_node_interior_verify(as->c, n1);
1309 btree_node_interior_verify(as->c, n2);
1316 * For updates to interior nodes, we've got to do the insert before we split
1317 * because the stuff we're inserting has to be inserted atomically. Post split,
1318 * the keys might have to go in different nodes and the split would no longer be
1321 * Worse, if the insert is from btree node coalescing, if we do the insert after
1322 * we do the split (and pick the pivot) - the pivot we pick might be between
1323 * nodes that were coalesced, and thus in the middle of a child node post
1326 static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
1327 struct btree_iter *iter,
1328 struct keylist *keys)
1330 struct btree_node_iter node_iter;
1331 struct bkey_i *k = bch2_keylist_front(keys);
1332 struct bkey_packed *src, *dst, *n;
1335 bch2_btree_node_iter_init(&node_iter, b, &k->k.p);
1337 __bch2_btree_insert_keys_interior(as, b, iter, keys, node_iter);
1340 * We can't tolerate whiteouts here - with whiteouts there can be
1341 * duplicate keys, and it would be rather bad if we picked a duplicate
1344 i = btree_bset_first(b);
1345 src = dst = i->start;
1346 while (src != vstruct_last(i)) {
1348 if (!bkey_deleted(src)) {
1349 memmove_u64s_down(dst, src, src->u64s);
1350 dst = bkey_next(dst);
1355 /* Also clear out the unwritten whiteouts area: */
1356 b->whiteout_u64s = 0;
1358 i->u64s = cpu_to_le16((u64 *) dst - i->_data);
1359 set_btree_bset_end(b, b->set);
1361 BUG_ON(b->nsets != 1 ||
1362 b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
1364 btree_node_interior_verify(as->c, b);
1367 static void btree_split(struct btree_update *as,
1368 struct btree_trans *trans, struct btree_iter *iter,
1369 struct btree *b, struct keylist *keys,
1372 struct bch_fs *c = as->c;
1373 struct btree *parent = btree_node_parent(iter, b);
1374 struct btree *n1, *n2 = NULL, *n3 = NULL;
1375 u64 start_time = local_clock();
1377 BUG_ON(!parent && (b != btree_node_root(c, b)));
1378 BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
1380 bch2_btree_interior_update_will_free_node(as, b);
1382 n1 = bch2_btree_node_alloc_replacement(as, b);
1383 bch2_btree_update_add_new_node(as, n1);
1386 btree_split_insert_keys(as, n1, iter, keys);
1388 if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) {
1389 trace_btree_split(c, b);
1391 n2 = __btree_split_node(as, n1, iter);
1393 bch2_btree_build_aux_trees(n2);
1394 bch2_btree_build_aux_trees(n1);
1395 six_unlock_write(&n2->c.lock);
1396 six_unlock_write(&n1->c.lock);
1398 bch2_btree_node_write(c, n1, SIX_LOCK_intent);
1399 bch2_btree_node_write(c, n2, SIX_LOCK_intent);
1402 * Note that on recursive parent_keys == keys, so we
1403 * can't start adding new keys to parent_keys before emptying it
1404 * out (which we did with btree_split_insert_keys() above)
1406 bch2_keylist_add(&as->parent_keys, &n1->key);
1407 bch2_keylist_add(&as->parent_keys, &n2->key);
1410 /* Depth increases, make a new root */
1411 n3 = __btree_root_alloc(as, b->c.level + 1);
1413 n3->sib_u64s[0] = U16_MAX;
1414 n3->sib_u64s[1] = U16_MAX;
1416 btree_split_insert_keys(as, n3, iter, &as->parent_keys);
1418 bch2_btree_node_write(c, n3, SIX_LOCK_intent);
1421 trace_btree_compact(c, b);
1423 bch2_btree_build_aux_trees(n1);
1424 six_unlock_write(&n1->c.lock);
1426 bch2_btree_node_write(c, n1, SIX_LOCK_intent);
1429 bch2_keylist_add(&as->parent_keys, &n1->key);
1432 /* New nodes all written, now make them visible: */
1435 /* Split a non root node */
1436 bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, flags);
1438 bch2_btree_set_root(as, n3, iter);
1440 /* Root filled up but didn't need to be split */
1441 bch2_btree_set_root(as, n1, iter);
1444 bch2_btree_update_get_open_buckets(as, n1);
1446 bch2_btree_update_get_open_buckets(as, n2);
1448 bch2_btree_update_get_open_buckets(as, n3);
1450 /* Successful split, update the iterator to point to the new nodes: */
1452 six_lock_increment(&b->c.lock, SIX_LOCK_intent);
1453 bch2_btree_iter_node_drop(iter, b);
1455 bch2_btree_iter_node_replace(iter, n3);
1457 bch2_btree_iter_node_replace(iter, n2);
1458 bch2_btree_iter_node_replace(iter, n1);
1461 * The old node must be freed (in memory) _before_ unlocking the new
1462 * nodes - else another thread could re-acquire a read lock on the old
1463 * node after another thread has locked and updated the new node, thus
1464 * seeing stale data:
1466 bch2_btree_node_free_inmem(c, b, iter);
1469 six_unlock_intent(&n3->c.lock);
1471 six_unlock_intent(&n2->c.lock);
1472 six_unlock_intent(&n1->c.lock);
1474 bch2_btree_trans_verify_locks(trans);
1476 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_split],
1481 bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
1482 struct btree_iter *iter, struct keylist *keys)
1484 struct btree_iter *linked;
1486 __bch2_btree_insert_keys_interior(as, b, iter, keys, iter->l[b->c.level].iter);
1488 btree_update_updated_node(as, b);
1490 trans_for_each_iter_with_node(iter->trans, b, linked)
1491 bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
1493 bch2_btree_trans_verify_iters(iter->trans, b);
1497 * bch_btree_insert_node - insert bkeys into a given btree node
1499 * @iter: btree iterator
1500 * @keys: list of keys to insert
1501 * @hook: insert callback
1502 * @persistent: if not null, @persistent will wait on journal write
1504 * Inserts as many keys as it can into a given btree node, splitting it if full.
1505 * If a split occurred, this function will return early. This can only happen
1506 * for leaf nodes -- inserts into interior nodes have to be atomic.
1508 static void bch2_btree_insert_node(struct btree_update *as,
1509 struct btree_trans *trans, struct btree_iter *iter,
1510 struct btree *b, struct keylist *keys,
1513 struct bch_fs *c = as->c;
1514 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
1515 int old_live_u64s = b->nr.live_u64s;
1516 int live_u64s_added, u64s_added;
1518 lockdep_assert_held(&c->gc_lock);
1519 BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
1520 BUG_ON(!b->c.level);
1521 BUG_ON(!as || as->b);
1522 bch2_verify_keylist_sorted(keys);
1524 bch2_btree_node_lock_for_insert(trans, iter, b);
1526 if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
1527 bch2_btree_node_unlock_write(b, iter);
1531 btree_node_interior_verify(c, b);
1533 bch2_btree_insert_keys_interior(as, b, iter, keys);
1535 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
1536 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
1538 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
1539 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
1540 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
1541 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
1543 if (u64s_added > live_u64s_added &&
1544 bch2_maybe_compact_whiteouts(c, b))
1545 bch2_btree_iter_reinit_node(iter, b);
1547 bch2_btree_node_unlock_write(b, iter);
1549 btree_node_interior_verify(c, b);
1552 btree_split(as, trans, iter, b, keys, flags);
1555 int bch2_btree_split_leaf(struct btree_trans *trans,
1556 struct btree_iter *iter,
1559 struct bch_fs *c = trans->c;
1560 struct btree *b = iter_l(iter)->b;
1561 struct btree_update *as;
1565 as = bch2_btree_update_start(iter, iter->level,
1566 btree_update_reserve_required(c, b), flags);
1570 btree_split(as, trans, iter, b, NULL, flags);
1571 bch2_btree_update_done(as);
1573 for (l = iter->level + 1; btree_iter_node(iter, l) && !ret; l++)
1574 ret = bch2_foreground_maybe_merge(trans, iter, l, flags);
1579 int __bch2_foreground_maybe_merge(struct btree_trans *trans,
1580 struct btree_iter *iter,
1583 enum btree_node_sibling sib)
1585 struct bch_fs *c = trans->c;
1586 struct btree_iter *sib_iter = NULL;
1587 struct btree_update *as;
1588 struct bkey_format_state new_s;
1589 struct bkey_format new_f;
1590 struct bkey_i delete;
1591 struct btree *b, *m, *n, *prev, *next, *parent;
1592 struct bpos sib_pos;
1594 int ret = 0, ret2 = 0;
1596 BUG_ON(!btree_node_locked(iter, level));
1598 ret = bch2_btree_iter_traverse(iter);
1602 BUG_ON(!btree_node_locked(iter, level));
1604 b = iter->l[level].b;
1606 if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) ||
1607 (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) {
1608 b->sib_u64s[sib] = U16_MAX;
1612 sib_pos = sib == btree_prev_sib
1613 ? bpos_predecessor(b->data->min_key)
1614 : bpos_successor(b->data->max_key);
1616 sib_iter = bch2_trans_get_node_iter(trans, iter->btree_id,
1617 sib_pos, U8_MAX, level,
1619 ret = bch2_btree_iter_traverse(sib_iter);
1623 m = sib_iter->l[level].b;
1625 if (btree_node_parent(iter, b) !=
1626 btree_node_parent(sib_iter, m)) {
1627 b->sib_u64s[sib] = U16_MAX;
1631 if (sib == btree_prev_sib) {
1639 if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) {
1640 char buf1[100], buf2[100];
1642 bch2_bpos_to_text(&PBUF(buf1), prev->data->max_key);
1643 bch2_bpos_to_text(&PBUF(buf2), next->data->min_key);
1645 "btree topology error in btree merge:\n"
1646 " prev ends at %s\n"
1647 " next starts at %s",
1649 bch2_topology_error(c);
1654 bch2_bkey_format_init(&new_s);
1655 bch2_bkey_format_add_pos(&new_s, prev->data->min_key);
1656 __bch2_btree_calc_format(&new_s, prev);
1657 __bch2_btree_calc_format(&new_s, next);
1658 bch2_bkey_format_add_pos(&new_s, next->data->max_key);
1659 new_f = bch2_bkey_format_done(&new_s);
1661 sib_u64s = btree_node_u64s_with_format(b, &new_f) +
1662 btree_node_u64s_with_format(m, &new_f);
1664 if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
1665 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1667 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1670 sib_u64s = min(sib_u64s, btree_max_u64s(c));
1671 sib_u64s = min(sib_u64s, (size_t) U16_MAX - 1);
1672 b->sib_u64s[sib] = sib_u64s;
1674 if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
1677 parent = btree_node_parent(iter, b);
1678 as = bch2_btree_update_start(iter, level,
1679 btree_update_reserve_required(c, parent) + 1,
1681 BTREE_INSERT_NOFAIL|
1682 BTREE_INSERT_USE_RESERVE);
1683 ret = PTR_ERR_OR_ZERO(as);
1687 trace_btree_merge(c, b);
1689 bch2_btree_interior_update_will_free_node(as, b);
1690 bch2_btree_interior_update_will_free_node(as, m);
1692 n = bch2_btree_node_alloc(as, b->c.level);
1693 bch2_btree_update_add_new_node(as, n);
1695 btree_set_min(n, prev->data->min_key);
1696 btree_set_max(n, next->data->max_key);
1697 n->data->format = new_f;
1699 btree_node_set_format(n, new_f);
1701 bch2_btree_sort_into(c, n, prev);
1702 bch2_btree_sort_into(c, n, next);
1704 bch2_btree_build_aux_trees(n);
1705 six_unlock_write(&n->c.lock);
1707 bch2_btree_node_write(c, n, SIX_LOCK_intent);
1709 bkey_init(&delete.k);
1710 delete.k.p = prev->key.k.p;
1711 bch2_keylist_add(&as->parent_keys, &delete);
1712 bch2_keylist_add(&as->parent_keys, &n->key);
1714 bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, flags);
1716 bch2_btree_update_get_open_buckets(as, n);
1718 six_lock_increment(&b->c.lock, SIX_LOCK_intent);
1719 six_lock_increment(&m->c.lock, SIX_LOCK_intent);
1720 bch2_btree_iter_node_drop(iter, b);
1721 bch2_btree_iter_node_drop(iter, m);
1723 bch2_btree_iter_node_replace(iter, n);
1725 bch2_btree_trans_verify_iters(trans, n);
1727 bch2_btree_node_free_inmem(c, b, iter);
1728 bch2_btree_node_free_inmem(c, m, iter);
1730 six_unlock_intent(&n->c.lock);
1732 bch2_btree_update_done(as);
1734 bch2_btree_trans_verify_locks(trans);
1735 bch2_trans_iter_free(trans, sib_iter);
1738 * Don't downgrade locks here: we're called after successful insert,
1739 * and the caller will downgrade locks after a successful insert
1740 * anyways (in case e.g. a split was required first)
1742 * And we're also called when inserting into interior nodes in the
1743 * split path, and downgrading to read locks in there is potentially
1748 bch2_trans_iter_put(trans, sib_iter);
1751 if (ret == -EINTR && bch2_trans_relock(trans))
1754 if (ret == -EINTR && !(flags & BTREE_INSERT_NOUNLOCK)) {
1756 ret = bch2_btree_iter_traverse_all(trans);
1765 * bch_btree_node_rewrite - Rewrite/move a btree node
1767 int bch2_btree_node_rewrite(struct btree_trans *trans,
1768 struct btree_iter *iter,
1769 __le64 seq, unsigned flags)
1771 struct bch_fs *c = trans->c;
1772 struct btree *b, *n, *parent;
1773 struct btree_update *as;
1776 flags |= BTREE_INSERT_NOFAIL;
1778 ret = bch2_btree_iter_traverse(iter);
1782 b = bch2_btree_iter_peek_node(iter);
1783 if (!b || b->data->keys.seq != seq)
1786 parent = btree_node_parent(iter, b);
1787 as = bch2_btree_update_start(iter, b->c.level,
1789 ? btree_update_reserve_required(c, parent)
1792 ret = PTR_ERR_OR_ZERO(as);
1796 trace_btree_gc_rewrite_node_fail(c, b);
1800 bch2_btree_interior_update_will_free_node(as, b);
1802 n = bch2_btree_node_alloc_replacement(as, b);
1803 bch2_btree_update_add_new_node(as, n);
1805 bch2_btree_build_aux_trees(n);
1806 six_unlock_write(&n->c.lock);
1808 trace_btree_gc_rewrite_node(c, b);
1810 bch2_btree_node_write(c, n, SIX_LOCK_intent);
1813 bch2_keylist_add(&as->parent_keys, &n->key);
1814 bch2_btree_insert_node(as, trans, iter, parent,
1815 &as->parent_keys, flags);
1817 bch2_btree_set_root(as, n, iter);
1820 bch2_btree_update_get_open_buckets(as, n);
1822 six_lock_increment(&b->c.lock, SIX_LOCK_intent);
1823 bch2_btree_iter_node_drop(iter, b);
1824 bch2_btree_iter_node_replace(iter, n);
1825 bch2_btree_node_free_inmem(c, b, iter);
1826 six_unlock_intent(&n->c.lock);
1828 bch2_btree_update_done(as);
1830 bch2_btree_iter_downgrade(iter);
1834 struct async_btree_rewrite {
1836 struct work_struct work;
1837 enum btree_id btree_id;
1843 void async_btree_node_rewrite_work(struct work_struct *work)
1845 struct async_btree_rewrite *a =
1846 container_of(work, struct async_btree_rewrite, work);
1847 struct bch_fs *c = a->c;
1848 struct btree_trans trans;
1849 struct btree_iter *iter;
1851 bch2_trans_init(&trans, c, 0, 0);
1852 iter = bch2_trans_get_node_iter(&trans, a->btree_id, a->pos,
1853 BTREE_MAX_DEPTH, a->level, 0);
1854 bch2_btree_node_rewrite(&trans, iter, a->seq, 0);
1855 bch2_trans_iter_put(&trans, iter);
1856 bch2_trans_exit(&trans);
1857 percpu_ref_put(&c->writes);
1861 void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
1863 struct async_btree_rewrite *a;
1865 if (!test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags))
1868 if (!percpu_ref_tryget(&c->writes))
1871 a = kmalloc(sizeof(*a), GFP_NOFS);
1873 percpu_ref_put(&c->writes);
1878 a->btree_id = b->c.btree_id;
1879 a->level = b->c.level;
1880 a->pos = b->key.k.p;
1881 a->seq = b->data->keys.seq;
1883 INIT_WORK(&a->work, async_btree_node_rewrite_work);
1884 queue_work(c->btree_interior_update_worker, &a->work);
1887 static int __bch2_btree_node_update_key(struct btree_trans *trans,
1888 struct btree_iter *iter,
1889 struct btree *b, struct btree *new_hash,
1890 struct bkey_i *new_key,
1893 struct bch_fs *c = trans->c;
1894 struct btree_iter *iter2 = NULL;
1895 struct btree *parent;
1896 u64 journal_entries[BKEY_BTREE_PTR_U64s_MAX];
1899 if (!skip_triggers) {
1900 ret = bch2_trans_mark_key(trans,
1902 bkey_i_to_s_c(new_key),
1903 BTREE_TRIGGER_INSERT);
1907 ret = bch2_trans_mark_key(trans,
1908 bkey_i_to_s_c(&b->key),
1910 BTREE_TRIGGER_OVERWRITE);
1916 bkey_copy(&new_hash->key, new_key);
1917 ret = bch2_btree_node_hash_insert(&c->btree_cache,
1918 new_hash, b->c.level, b->c.btree_id);
1922 parent = btree_node_parent(iter, b);
1924 iter2 = bch2_trans_copy_iter(trans, iter);
1926 BUG_ON(iter2->level != b->c.level);
1927 BUG_ON(bpos_cmp(iter2->pos, new_key->k.p));
1929 btree_node_unlock(iter2, iter2->level);
1930 iter2->l[iter2->level].b = BTREE_ITER_NO_NODE_UP;
1933 ret = bch2_btree_iter_traverse(iter2) ?:
1934 bch2_trans_update(trans, iter2, new_key, BTREE_TRIGGER_NORUN);
1938 BUG_ON(btree_node_root(c, b) != b);
1940 trans->extra_journal_entries = (void *) &journal_entries[0];
1941 trans->extra_journal_entry_u64s =
1942 journal_entry_set((void *) &journal_entries[0],
1943 BCH_JSET_ENTRY_btree_root,
1944 b->c.btree_id, b->c.level,
1945 new_key, new_key->k.u64s);
1948 ret = bch2_trans_commit(trans, NULL, NULL,
1949 BTREE_INSERT_NOFAIL|
1950 BTREE_INSERT_NOCHECK_RW|
1951 BTREE_INSERT_JOURNAL_RECLAIM|
1952 BTREE_INSERT_JOURNAL_RESERVED|
1953 BTREE_INSERT_NOUNLOCK);
1957 bch2_btree_node_lock_write(b, iter);
1960 mutex_lock(&c->btree_cache.lock);
1961 bch2_btree_node_hash_remove(&c->btree_cache, new_hash);
1962 bch2_btree_node_hash_remove(&c->btree_cache, b);
1964 bkey_copy(&b->key, new_key);
1965 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
1967 mutex_unlock(&c->btree_cache.lock);
1969 bkey_copy(&b->key, new_key);
1972 bch2_btree_node_unlock_write(b, iter);
1974 bch2_trans_iter_put(trans, iter2);
1978 mutex_lock(&c->btree_cache.lock);
1979 bch2_btree_node_hash_remove(&c->btree_cache, b);
1980 mutex_unlock(&c->btree_cache.lock);
1985 int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
1986 struct btree *b, struct bkey_i *new_key,
1989 struct bch_fs *c = trans->c;
1990 struct btree *new_hash = NULL;
1994 closure_init_stack(&cl);
1997 * check btree_ptr_hash_val() after @b is locked by
1998 * btree_iter_traverse():
2000 if (btree_ptr_hash_val(new_key) != b->hash_val) {
2001 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
2003 bch2_trans_unlock(trans);
2005 if (!bch2_trans_relock(trans))
2009 new_hash = bch2_btree_node_mem_alloc(c);
2012 ret = __bch2_btree_node_update_key(trans, iter, b, new_hash,
2013 new_key, skip_triggers);
2016 mutex_lock(&c->btree_cache.lock);
2017 list_move(&new_hash->list, &c->btree_cache.freeable);
2018 mutex_unlock(&c->btree_cache.lock);
2020 six_unlock_write(&new_hash->c.lock);
2021 six_unlock_intent(&new_hash->c.lock);
2024 bch2_btree_cache_cannibalize_unlock(c);
2028 int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
2029 struct btree *b, struct bkey_i *new_key,
2032 struct btree_iter *iter;
2035 iter = bch2_trans_get_node_iter(trans, b->c.btree_id, b->key.k.p,
2036 BTREE_MAX_DEPTH, b->c.level,
2038 ret = bch2_btree_iter_traverse(iter);
2042 /* has node been freed? */
2043 if (iter->l[b->c.level].b != b) {
2044 /* node has been freed: */
2045 BUG_ON(!btree_node_dying(b));
2049 BUG_ON(!btree_node_hashed(b));
2051 ret = bch2_btree_node_update_key(trans, iter, b, new_key, skip_triggers);
2053 bch2_trans_iter_put(trans, iter);
2060 * Only for filesystem bringup, when first reading the btree roots or allocating
2061 * btree roots when initializing a new filesystem:
2063 void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
2065 BUG_ON(btree_node_root(c, b));
2067 bch2_btree_set_root_inmem(c, b);
2070 void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
2076 closure_init_stack(&cl);
2079 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
2083 b = bch2_btree_node_mem_alloc(c);
2084 bch2_btree_cache_cannibalize_unlock(c);
2086 set_btree_node_fake(b);
2087 set_btree_node_need_rewrite(b);
2091 bkey_btree_ptr_init(&b->key);
2092 b->key.k.p = SPOS_MAX;
2093 *((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id;
2095 bch2_bset_init_first(b, &b->data->keys);
2096 bch2_btree_build_aux_trees(b);
2099 btree_set_min(b, POS_MIN);
2100 btree_set_max(b, SPOS_MAX);
2101 b->data->format = bch2_btree_calc_format(b);
2102 btree_node_set_format(b, b->data->format);
2104 ret = bch2_btree_node_hash_insert(&c->btree_cache, b,
2105 b->c.level, b->c.btree_id);
2108 bch2_btree_set_root_inmem(c, b);
2110 six_unlock_write(&b->c.lock);
2111 six_unlock_intent(&b->c.lock);
2114 void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
2116 struct btree_update *as;
2118 mutex_lock(&c->btree_interior_update_lock);
2119 list_for_each_entry(as, &c->btree_interior_update_list, list)
2120 pr_buf(out, "%p m %u w %u r %u j %llu\n",
2124 atomic_read(&as->cl.remaining) & CLOSURE_REMAINING_MASK,
2126 mutex_unlock(&c->btree_interior_update_lock);
2129 size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *c)
2132 struct list_head *i;
2134 mutex_lock(&c->btree_interior_update_lock);
2135 list_for_each(i, &c->btree_interior_update_list)
2137 mutex_unlock(&c->btree_interior_update_lock);
2142 void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset)
2144 struct btree_root *r;
2145 struct jset_entry *entry;
2147 mutex_lock(&c->btree_root_lock);
2149 vstruct_for_each(jset, entry)
2150 if (entry->type == BCH_JSET_ENTRY_btree_root) {
2151 r = &c->btree_roots[entry->btree_id];
2152 r->level = entry->level;
2154 bkey_copy(&r->key, &entry->start[0]);
2157 mutex_unlock(&c->btree_root_lock);
2161 bch2_btree_roots_to_journal_entries(struct bch_fs *c,
2162 struct jset_entry *start,
2163 struct jset_entry *end)
2165 struct jset_entry *entry;
2166 unsigned long have = 0;
2169 for (entry = start; entry < end; entry = vstruct_next(entry))
2170 if (entry->type == BCH_JSET_ENTRY_btree_root)
2171 __set_bit(entry->btree_id, &have);
2173 mutex_lock(&c->btree_root_lock);
2175 for (i = 0; i < BTREE_ID_NR; i++)
2176 if (c->btree_roots[i].alive && !test_bit(i, &have)) {
2177 journal_entry_set(end,
2178 BCH_JSET_ENTRY_btree_root,
2179 i, c->btree_roots[i].level,
2180 &c->btree_roots[i].key,
2181 c->btree_roots[i].key.u64s);
2182 end = vstruct_next(end);
2185 mutex_unlock(&c->btree_root_lock);
2190 void bch2_fs_btree_interior_update_exit(struct bch_fs *c)
2192 if (c->btree_interior_update_worker)
2193 destroy_workqueue(c->btree_interior_update_worker);
2194 mempool_exit(&c->btree_interior_update_pool);
2197 int bch2_fs_btree_interior_update_init(struct bch_fs *c)
2199 mutex_init(&c->btree_reserve_cache_lock);
2200 INIT_LIST_HEAD(&c->btree_interior_update_list);
2201 INIT_LIST_HEAD(&c->btree_interior_updates_unwritten);
2202 mutex_init(&c->btree_interior_update_lock);
2203 INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work);
2205 c->btree_interior_update_worker =
2206 alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
2207 if (!c->btree_interior_update_worker)
2210 return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
2211 sizeof(struct btree_update));