1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
5 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
11 #include "btree_iter.h"
12 #include "btree_locking.h"
17 #include "journal_reclaim.h"
22 #include <linux/random.h>
23 #include <trace/events/bcachefs.h>
25 static void bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
26 struct btree_path *, struct btree *,
27 struct keylist *, unsigned);
28 static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
33 * Verify that child nodes correctly span parent node's range:
35 static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
37 #ifdef CONFIG_BCACHEFS_DEBUG
38 struct bpos next_node = b->data->min_key;
39 struct btree_node_iter iter;
41 struct bkey_s_c_btree_ptr_v2 bp;
43 char buf1[100], buf2[100];
47 if (!test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags))
50 bch2_btree_node_iter_init_from_start(&iter, b);
53 k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked);
54 if (k.k->type != KEY_TYPE_btree_ptr_v2)
56 bp = bkey_s_c_to_btree_ptr_v2(k);
58 if (bpos_cmp(next_node, bp.v->min_key)) {
59 bch2_dump_btree_node(c, b);
60 panic("expected next min_key %s got %s\n",
61 (bch2_bpos_to_text(&PBUF(buf1), next_node), buf1),
62 (bch2_bpos_to_text(&PBUF(buf2), bp.v->min_key), buf2));
65 bch2_btree_node_iter_advance(&iter, b);
67 if (bch2_btree_node_iter_end(&iter)) {
68 if (bpos_cmp(k.k->p, b->key.k.p)) {
69 bch2_dump_btree_node(c, b);
70 panic("expected end %s got %s\n",
71 (bch2_bpos_to_text(&PBUF(buf1), b->key.k.p), buf1),
72 (bch2_bpos_to_text(&PBUF(buf2), k.k->p), buf2));
77 next_node = bpos_successor(k.k->p);
82 /* Calculate ideal packed bkey format for new btree nodes: */
84 void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
86 struct bkey_packed *k;
91 bset_tree_for_each_key(b, t, k)
92 if (!bkey_deleted(k)) {
93 uk = bkey_unpack_key(b, k);
94 bch2_bkey_format_add_key(s, &uk);
98 static struct bkey_format bch2_btree_calc_format(struct btree *b)
100 struct bkey_format_state s;
102 bch2_bkey_format_init(&s);
103 bch2_bkey_format_add_pos(&s, b->data->min_key);
104 bch2_bkey_format_add_pos(&s, b->data->max_key);
105 __bch2_btree_calc_format(&s, b);
107 return bch2_bkey_format_done(&s);
110 static size_t btree_node_u64s_with_format(struct btree *b,
111 struct bkey_format *new_f)
113 struct bkey_format *old_f = &b->format;
115 /* stupid integer promotion rules */
117 (((int) new_f->key_u64s - old_f->key_u64s) *
118 (int) b->nr.packed_keys) +
119 (((int) new_f->key_u64s - BKEY_U64s) *
120 (int) b->nr.unpacked_keys);
122 BUG_ON(delta + b->nr.live_u64s < 0);
124 return b->nr.live_u64s + delta;
128 * btree_node_format_fits - check if we could rewrite node with a new format
130 * This assumes all keys can pack with the new format -- it just checks if
131 * the re-packed keys would fit inside the node itself.
133 bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
134 struct bkey_format *new_f)
136 size_t u64s = btree_node_u64s_with_format(b, new_f);
138 return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
141 /* Btree node freeing/allocation: */
143 static void __btree_node_free(struct bch_fs *c, struct btree *b)
145 trace_btree_node_free(c, b);
147 BUG_ON(btree_node_dirty(b));
148 BUG_ON(btree_node_need_write(b));
149 BUG_ON(b == btree_node_root(c, b));
151 BUG_ON(!list_empty(&b->write_blocked));
152 BUG_ON(b->will_make_reachable);
154 clear_btree_node_noevict(b);
156 mutex_lock(&c->btree_cache.lock);
157 list_move(&b->list, &c->btree_cache.freeable);
158 mutex_unlock(&c->btree_cache.lock);
161 static void bch2_btree_node_free_inmem(struct btree_trans *trans,
164 struct bch_fs *c = trans->c;
165 struct btree_path *path;
167 trans_for_each_path(trans, path)
168 BUG_ON(path->l[b->c.level].b == b &&
169 path->l[b->c.level].lock_seq == b->c.lock.state.seq);
171 six_lock_write(&b->c.lock, NULL, NULL);
173 bch2_btree_node_hash_remove(&c->btree_cache, b);
174 __btree_node_free(c, b);
176 six_unlock_write(&b->c.lock);
177 six_unlock_intent(&b->c.lock);
180 static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
181 struct disk_reservation *res,
185 struct write_point *wp;
187 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
188 struct open_buckets ob = { .nr = 0 };
189 struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
191 enum alloc_reserve alloc_reserve;
193 if (flags & BTREE_INSERT_USE_RESERVE) {
195 alloc_reserve = RESERVE_BTREE_MOVINGGC;
197 nr_reserve = BTREE_NODE_RESERVE;
198 alloc_reserve = RESERVE_BTREE;
201 mutex_lock(&c->btree_reserve_cache_lock);
202 if (c->btree_reserve_cache_nr > nr_reserve) {
203 struct btree_alloc *a =
204 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
207 bkey_copy(&tmp.k, &a->k);
208 mutex_unlock(&c->btree_reserve_cache_lock);
211 mutex_unlock(&c->btree_reserve_cache_lock);
214 wp = bch2_alloc_sectors_start(c,
215 c->opts.metadata_target ?:
216 c->opts.foreground_target,
218 writepoint_ptr(&c->btree_write_point),
221 c->opts.metadata_replicas_required,
222 alloc_reserve, 0, cl);
226 if (wp->sectors_free < btree_sectors(c)) {
227 struct open_bucket *ob;
230 open_bucket_for_each(c, &wp->ptrs, ob, i)
231 if (ob->sectors_free < btree_sectors(c))
232 ob->sectors_free = 0;
234 bch2_alloc_sectors_done(c, wp);
238 bkey_btree_ptr_v2_init(&tmp.k);
239 bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false);
241 bch2_open_bucket_get(c, wp, &ob);
242 bch2_alloc_sectors_done(c, wp);
244 b = bch2_btree_node_mem_alloc(c);
246 /* we hold cannibalize_lock: */
250 bkey_copy(&b->key, &tmp.k);
256 static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned level)
258 struct bch_fs *c = as->c;
262 BUG_ON(level >= BTREE_MAX_DEPTH);
263 BUG_ON(!as->nr_prealloc_nodes);
265 b = as->prealloc_nodes[--as->nr_prealloc_nodes];
267 set_btree_node_accessed(b);
268 set_btree_node_dirty(c, b);
269 set_btree_node_need_write(b);
271 bch2_bset_init_first(b, &b->data->keys);
273 b->c.btree_id = as->btree_id;
274 b->version_ondisk = c->sb.version;
276 memset(&b->nr, 0, sizeof(b->nr));
277 b->data->magic = cpu_to_le64(bset_magic(c));
278 memset(&b->data->_ptr, 0, sizeof(b->data->_ptr));
280 SET_BTREE_NODE_ID(b->data, as->btree_id);
281 SET_BTREE_NODE_LEVEL(b->data, level);
283 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
284 struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(&b->key);
287 bp->v.seq = b->data->keys.seq;
288 bp->v.sectors_written = 0;
291 SET_BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data, true);
293 bch2_btree_build_aux_trees(b);
295 ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id);
298 trace_btree_node_alloc(c, b);
302 static void btree_set_min(struct btree *b, struct bpos pos)
304 if (b->key.k.type == KEY_TYPE_btree_ptr_v2)
305 bkey_i_to_btree_ptr_v2(&b->key)->v.min_key = pos;
306 b->data->min_key = pos;
309 static void btree_set_max(struct btree *b, struct bpos pos)
312 b->data->max_key = pos;
315 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
317 struct bkey_format format)
321 n = bch2_btree_node_alloc(as, b->c.level);
323 SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
325 btree_set_min(n, b->data->min_key);
326 btree_set_max(n, b->data->max_key);
328 n->data->format = format;
329 btree_node_set_format(n, format);
331 bch2_btree_sort_into(as->c, n, b);
333 btree_node_reset_sib_u64s(n);
335 n->key.k.p = b->key.k.p;
339 static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
342 struct bkey_format new_f = bch2_btree_calc_format(b);
345 * The keys might expand with the new format - if they wouldn't fit in
346 * the btree node anymore, use the old format for now:
348 if (!bch2_btree_node_format_fits(as->c, b, &new_f))
351 return __bch2_btree_node_alloc_replacement(as, b, new_f);
354 static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level)
356 struct btree *b = bch2_btree_node_alloc(as, level);
358 btree_set_min(b, POS_MIN);
359 btree_set_max(b, SPOS_MAX);
360 b->data->format = bch2_btree_calc_format(b);
362 btree_node_set_format(b, b->data->format);
363 bch2_btree_build_aux_trees(b);
365 bch2_btree_update_add_new_node(as, b);
366 six_unlock_write(&b->c.lock);
371 static void bch2_btree_reserve_put(struct btree_update *as)
373 struct bch_fs *c = as->c;
375 mutex_lock(&c->btree_reserve_cache_lock);
377 while (as->nr_prealloc_nodes) {
378 struct btree *b = as->prealloc_nodes[--as->nr_prealloc_nodes];
380 six_unlock_write(&b->c.lock);
382 if (c->btree_reserve_cache_nr <
383 ARRAY_SIZE(c->btree_reserve_cache)) {
384 struct btree_alloc *a =
385 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
389 bkey_copy(&a->k, &b->key);
391 bch2_open_buckets_put(c, &b->ob);
394 btree_node_lock_type(c, b, SIX_LOCK_write);
395 __btree_node_free(c, b);
396 six_unlock_write(&b->c.lock);
398 six_unlock_intent(&b->c.lock);
401 mutex_unlock(&c->btree_reserve_cache_lock);
404 static int bch2_btree_reserve_get(struct btree_update *as, unsigned nr_nodes,
405 unsigned flags, struct closure *cl)
407 struct bch_fs *c = as->c;
411 BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
414 * Protects reaping from the btree node cache and using the btree node
415 * open bucket reserve:
417 ret = bch2_btree_cache_cannibalize_lock(c, cl);
421 while (as->nr_prealloc_nodes < nr_nodes) {
422 b = __bch2_btree_node_alloc(c, &as->disk_res,
423 flags & BTREE_INSERT_NOWAIT
430 as->prealloc_nodes[as->nr_prealloc_nodes++] = b;
433 bch2_btree_cache_cannibalize_unlock(c);
436 bch2_btree_cache_cannibalize_unlock(c);
437 trace_btree_reserve_get_fail(c, nr_nodes, cl);
441 /* Asynchronous interior node update machinery */
443 static void bch2_btree_update_free(struct btree_update *as)
445 struct bch_fs *c = as->c;
447 if (as->took_gc_lock)
448 up_read(&c->gc_lock);
449 as->took_gc_lock = false;
451 bch2_journal_preres_put(&c->journal, &as->journal_preres);
453 bch2_journal_pin_drop(&c->journal, &as->journal);
454 bch2_journal_pin_flush(&c->journal, &as->journal);
455 bch2_disk_reservation_put(c, &as->disk_res);
456 bch2_btree_reserve_put(as);
458 bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_total],
461 mutex_lock(&c->btree_interior_update_lock);
462 list_del(&as->unwritten_list);
465 closure_debug_destroy(&as->cl);
466 mempool_free(as, &c->btree_interior_update_pool);
469 * Have to do the wakeup with btree_interior_update_lock still held,
470 * since being on btree_interior_update_list is our ref on @c:
472 closure_wake_up(&c->btree_interior_update_wait);
474 mutex_unlock(&c->btree_interior_update_lock);
477 static void btree_update_will_delete_key(struct btree_update *as,
480 BUG_ON(bch2_keylist_u64s(&as->old_keys) + k->k.u64s >
481 ARRAY_SIZE(as->_old_keys));
482 bch2_keylist_add(&as->old_keys, k);
485 static void btree_update_will_add_key(struct btree_update *as,
488 BUG_ON(bch2_keylist_u64s(&as->new_keys) + k->k.u64s >
489 ARRAY_SIZE(as->_new_keys));
490 bch2_keylist_add(&as->new_keys, k);
494 * The transactional part of an interior btree node update, where we journal the
495 * update we did to the interior node and update alloc info:
497 static int btree_update_nodes_written_trans(struct btree_trans *trans,
498 struct btree_update *as)
503 trans->extra_journal_entries = (void *) &as->journal_entries[0];
504 trans->extra_journal_entry_u64s = as->journal_u64s;
505 trans->journal_pin = &as->journal;
507 for_each_keylist_key(&as->new_keys, k) {
508 ret = bch2_trans_mark_key(trans,
511 BTREE_TRIGGER_INSERT);
516 for_each_keylist_key(&as->old_keys, k) {
517 ret = bch2_trans_mark_key(trans,
520 BTREE_TRIGGER_OVERWRITE);
528 static void btree_update_nodes_written(struct btree_update *as)
530 struct bch_fs *c = as->c;
531 struct btree *b = as->b;
532 struct btree_trans trans;
538 * If we're already in an error state, it might be because a btree node
539 * was never written, and we might be trying to free that same btree
540 * node here, but it won't have been marked as allocated and we'll see
541 * spurious disk usage inconsistencies in the transactional part below
542 * if we don't skip it:
544 ret = bch2_journal_error(&c->journal);
548 BUG_ON(!journal_pin_active(&as->journal));
551 * Wait for any in flight writes to finish before we free the old nodes
554 for (i = 0; i < as->nr_old_nodes; i++) {
555 struct btree *old = as->old_nodes[i];
558 six_lock_read(&old->c.lock, NULL, NULL);
559 seq = old->data ? old->data->keys.seq : 0;
560 six_unlock_read(&old->c.lock);
562 if (seq == as->old_nodes_seq[i])
563 wait_on_bit_io(&old->flags, BTREE_NODE_write_in_flight_inner,
564 TASK_UNINTERRUPTIBLE);
568 * We did an update to a parent node where the pointers we added pointed
569 * to child nodes that weren't written yet: now, the child nodes have
570 * been written so we can write out the update to the interior node.
574 * We can't call into journal reclaim here: we'd block on the journal
575 * reclaim lock, but we may need to release the open buckets we have
576 * pinned in order for other btree updates to make forward progress, and
577 * journal reclaim does btree updates when flushing bkey_cached entries,
578 * which may require allocations as well.
580 bch2_trans_init(&trans, c, 0, 512);
581 ret = __bch2_trans_do(&trans, &as->disk_res, &journal_seq,
583 BTREE_INSERT_NOCHECK_RW|
584 BTREE_INSERT_JOURNAL_RECLAIM|
585 BTREE_INSERT_JOURNAL_RESERVED,
586 btree_update_nodes_written_trans(&trans, as));
587 bch2_trans_exit(&trans);
589 bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
590 "error %i in btree_update_nodes_written()", ret);
594 * @b is the node we did the final insert into:
596 * On failure to get a journal reservation, we still have to
597 * unblock the write and allow most of the write path to happen
598 * so that shutdown works, but the i->journal_seq mechanism
599 * won't work to prevent the btree write from being visible (we
600 * didn't get a journal sequence number) - instead
601 * __bch2_btree_node_write() doesn't do the actual write if
602 * we're in journal error state:
605 btree_node_lock_type(c, b, SIX_LOCK_intent);
606 btree_node_lock_type(c, b, SIX_LOCK_write);
607 mutex_lock(&c->btree_interior_update_lock);
609 list_del(&as->write_blocked_list);
612 * Node might have been freed, recheck under
613 * btree_interior_update_lock:
616 struct bset *i = btree_bset_last(b);
619 BUG_ON(!btree_node_dirty(b));
622 i->journal_seq = cpu_to_le64(
624 le64_to_cpu(i->journal_seq)));
626 bch2_btree_add_journal_pin(c, b, journal_seq);
629 * If we didn't get a journal sequence number we
630 * can't write this btree node, because recovery
631 * won't know to ignore this write:
633 set_btree_node_never_write(b);
637 mutex_unlock(&c->btree_interior_update_lock);
638 six_unlock_write(&b->c.lock);
640 btree_node_write_if_need(c, b, SIX_LOCK_intent);
641 six_unlock_intent(&b->c.lock);
644 bch2_journal_pin_drop(&c->journal, &as->journal);
646 bch2_journal_preres_put(&c->journal, &as->journal_preres);
648 mutex_lock(&c->btree_interior_update_lock);
649 for (i = 0; i < as->nr_new_nodes; i++) {
650 b = as->new_nodes[i];
652 BUG_ON(b->will_make_reachable != (unsigned long) as);
653 b->will_make_reachable = 0;
655 mutex_unlock(&c->btree_interior_update_lock);
657 for (i = 0; i < as->nr_new_nodes; i++) {
658 b = as->new_nodes[i];
660 btree_node_lock_type(c, b, SIX_LOCK_read);
661 btree_node_write_if_need(c, b, SIX_LOCK_read);
662 six_unlock_read(&b->c.lock);
665 for (i = 0; i < as->nr_open_buckets; i++)
666 bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]);
668 bch2_btree_update_free(as);
671 static void btree_interior_update_work(struct work_struct *work)
674 container_of(work, struct bch_fs, btree_interior_update_work);
675 struct btree_update *as;
678 mutex_lock(&c->btree_interior_update_lock);
679 as = list_first_entry_or_null(&c->btree_interior_updates_unwritten,
680 struct btree_update, unwritten_list);
681 if (as && !as->nodes_written)
683 mutex_unlock(&c->btree_interior_update_lock);
688 btree_update_nodes_written(as);
692 static void btree_update_set_nodes_written(struct closure *cl)
694 struct btree_update *as = container_of(cl, struct btree_update, cl);
695 struct bch_fs *c = as->c;
697 mutex_lock(&c->btree_interior_update_lock);
698 as->nodes_written = true;
699 mutex_unlock(&c->btree_interior_update_lock);
701 queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work);
705 * We're updating @b with pointers to nodes that haven't finished writing yet:
706 * block @b from being written until @as completes
708 static void btree_update_updated_node(struct btree_update *as, struct btree *b)
710 struct bch_fs *c = as->c;
712 mutex_lock(&c->btree_interior_update_lock);
713 list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
715 BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
716 BUG_ON(!btree_node_dirty(b));
718 as->mode = BTREE_INTERIOR_UPDATING_NODE;
720 list_add(&as->write_blocked_list, &b->write_blocked);
722 mutex_unlock(&c->btree_interior_update_lock);
725 static void btree_update_reparent(struct btree_update *as,
726 struct btree_update *child)
728 struct bch_fs *c = as->c;
730 lockdep_assert_held(&c->btree_interior_update_lock);
733 child->mode = BTREE_INTERIOR_UPDATING_AS;
735 bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, NULL);
738 static void btree_update_updated_root(struct btree_update *as, struct btree *b)
740 struct bkey_i *insert = &b->key;
741 struct bch_fs *c = as->c;
743 BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
745 BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
746 ARRAY_SIZE(as->journal_entries));
749 journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
750 BCH_JSET_ENTRY_btree_root,
751 b->c.btree_id, b->c.level,
752 insert, insert->k.u64s);
754 mutex_lock(&c->btree_interior_update_lock);
755 list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
757 as->mode = BTREE_INTERIOR_UPDATING_ROOT;
758 mutex_unlock(&c->btree_interior_update_lock);
762 * bch2_btree_update_add_new_node:
764 * This causes @as to wait on @b to be written, before it gets to
765 * bch2_btree_update_nodes_written
767 * Additionally, it sets b->will_make_reachable to prevent any additional writes
768 * to @b from happening besides the first until @b is reachable on disk
770 * And it adds @b to the list of @as's new nodes, so that we can update sector
771 * counts in bch2_btree_update_nodes_written:
773 static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
775 struct bch_fs *c = as->c;
777 closure_get(&as->cl);
779 mutex_lock(&c->btree_interior_update_lock);
780 BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
781 BUG_ON(b->will_make_reachable);
783 as->new_nodes[as->nr_new_nodes++] = b;
784 b->will_make_reachable = 1UL|(unsigned long) as;
786 mutex_unlock(&c->btree_interior_update_lock);
788 btree_update_will_add_key(as, &b->key);
792 * returns true if @b was a new node
794 static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
796 struct btree_update *as;
800 mutex_lock(&c->btree_interior_update_lock);
802 * When b->will_make_reachable != 0, it owns a ref on as->cl that's
803 * dropped when it gets written by bch2_btree_complete_write - the
804 * xchg() is for synchronization with bch2_btree_complete_write:
806 v = xchg(&b->will_make_reachable, 0);
807 as = (struct btree_update *) (v & ~1UL);
810 mutex_unlock(&c->btree_interior_update_lock);
814 for (i = 0; i < as->nr_new_nodes; i++)
815 if (as->new_nodes[i] == b)
820 array_remove_item(as->new_nodes, as->nr_new_nodes, i);
821 mutex_unlock(&c->btree_interior_update_lock);
824 closure_put(&as->cl);
827 static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
830 as->open_buckets[as->nr_open_buckets++] =
835 * @b is being split/rewritten: it may have pointers to not-yet-written btree
836 * nodes and thus outstanding btree_updates - redirect @b's
837 * btree_updates to point to this btree_update:
839 static void bch2_btree_interior_update_will_free_node(struct btree_update *as,
842 struct bch_fs *c = as->c;
843 struct btree_update *p, *n;
844 struct btree_write *w;
846 set_btree_node_dying(b);
848 if (btree_node_fake(b))
851 mutex_lock(&c->btree_interior_update_lock);
854 * Does this node have any btree_update operations preventing
855 * it from being written?
857 * If so, redirect them to point to this btree_update: we can
858 * write out our new nodes, but we won't make them visible until those
859 * operations complete
861 list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
862 list_del_init(&p->write_blocked_list);
863 btree_update_reparent(as, p);
866 * for flush_held_btree_writes() waiting on updates to flush or
867 * nodes to be writeable:
869 closure_wake_up(&c->btree_interior_update_wait);
872 clear_btree_node_dirty(c, b);
873 clear_btree_node_need_write(b);
876 * Does this node have unwritten data that has a pin on the journal?
878 * If so, transfer that pin to the btree_update operation -
879 * note that if we're freeing multiple nodes, we only need to keep the
880 * oldest pin of any of the nodes we're freeing. We'll release the pin
881 * when the new nodes are persistent and reachable on disk:
883 w = btree_current_write(b);
884 bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL);
885 bch2_journal_pin_drop(&c->journal, &w->journal);
887 w = btree_prev_write(b);
888 bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL);
889 bch2_journal_pin_drop(&c->journal, &w->journal);
891 mutex_unlock(&c->btree_interior_update_lock);
894 * Is this a node that isn't reachable on disk yet?
896 * Nodes that aren't reachable yet have writes blocked until they're
897 * reachable - now that we've cancelled any pending writes and moved
898 * things waiting on that write to wait on this update, we can drop this
899 * node from the list of nodes that the other update is making
900 * reachable, prior to freeing it:
902 btree_update_drop_new_node(c, b);
904 btree_update_will_delete_key(as, &b->key);
906 as->old_nodes[as->nr_old_nodes] = b;
907 as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq;
911 static void bch2_btree_update_done(struct btree_update *as)
913 struct bch_fs *c = as->c;
914 u64 start_time = as->start_time;
916 BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
918 if (as->took_gc_lock)
919 up_read(&as->c->gc_lock);
920 as->took_gc_lock = false;
922 bch2_btree_reserve_put(as);
924 continue_at(&as->cl, btree_update_set_nodes_written,
925 as->c->btree_interior_update_worker);
927 bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_foreground],
931 static struct btree_update *
932 bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
933 unsigned level, unsigned nr_nodes, unsigned flags)
935 struct bch_fs *c = trans->c;
936 struct btree_update *as;
938 u64 start_time = local_clock();
939 int disk_res_flags = (flags & BTREE_INSERT_NOFAIL)
940 ? BCH_DISK_RESERVATION_NOFAIL : 0;
941 int journal_flags = 0;
944 BUG_ON(!path->should_be_locked);
946 if (flags & BTREE_INSERT_JOURNAL_RESERVED)
947 journal_flags |= JOURNAL_RES_GET_RESERVED;
949 closure_init_stack(&cl);
953 * XXX: figure out how far we might need to split,
954 * instead of locking/reserving all the way to the root:
956 if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) {
957 trace_trans_restart_iter_upgrade(trans->ip, _RET_IP_,
958 path->btree_id, &path->pos);
959 ret = btree_trans_restart(trans);
963 if (flags & BTREE_INSERT_GC_LOCK_HELD)
964 lockdep_assert_held(&c->gc_lock);
965 else if (!down_read_trylock(&c->gc_lock)) {
966 bch2_trans_unlock(trans);
967 down_read(&c->gc_lock);
968 if (!bch2_trans_relock(trans)) {
969 up_read(&c->gc_lock);
970 return ERR_PTR(-EINTR);
974 as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
975 memset(as, 0, sizeof(*as));
976 closure_init(&as->cl, NULL);
978 as->start_time = start_time;
979 as->mode = BTREE_INTERIOR_NO_UPDATE;
980 as->took_gc_lock = !(flags & BTREE_INSERT_GC_LOCK_HELD);
981 as->btree_id = path->btree_id;
982 INIT_LIST_HEAD(&as->list);
983 INIT_LIST_HEAD(&as->unwritten_list);
984 INIT_LIST_HEAD(&as->write_blocked_list);
985 bch2_keylist_init(&as->old_keys, as->_old_keys);
986 bch2_keylist_init(&as->new_keys, as->_new_keys);
987 bch2_keylist_init(&as->parent_keys, as->inline_keys);
989 mutex_lock(&c->btree_interior_update_lock);
990 list_add_tail(&as->list, &c->btree_interior_update_list);
991 mutex_unlock(&c->btree_interior_update_lock);
994 * We don't want to allocate if we're in an error state, that can cause
995 * deadlock on emergency shutdown due to open buckets getting stuck in
996 * the btree_reserve_cache after allocator shutdown has cleared it out.
997 * This check needs to come after adding us to the btree_interior_update
998 * list but before calling bch2_btree_reserve_get, to synchronize with
999 * __bch2_fs_read_only().
1001 ret = bch2_journal_error(&c->journal);
1005 ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
1006 BTREE_UPDATE_JOURNAL_RES,
1007 journal_flags|JOURNAL_RES_GET_NONBLOCK);
1008 if (ret == -EAGAIN) {
1009 bch2_trans_unlock(trans);
1011 if (flags & BTREE_INSERT_JOURNAL_RECLAIM) {
1012 bch2_btree_update_free(as);
1013 btree_trans_restart(trans);
1014 return ERR_PTR(ret);
1017 ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
1018 BTREE_UPDATE_JOURNAL_RES,
1021 trace_trans_restart_journal_preres_get(trans->ip, _RET_IP_);
1025 if (!bch2_trans_relock(trans)) {
1031 ret = bch2_disk_reservation_get(c, &as->disk_res,
1032 nr_nodes * btree_sectors(c),
1033 c->opts.metadata_replicas,
1038 ret = bch2_btree_reserve_get(as, nr_nodes, flags, &cl);
1042 bch2_journal_pin_add(&c->journal,
1043 atomic64_read(&c->journal.seq),
1044 &as->journal, NULL);
1048 bch2_btree_update_free(as);
1050 if (ret == -EAGAIN) {
1051 bch2_trans_unlock(trans);
1056 if (ret == -EINTR && bch2_trans_relock(trans))
1059 return ERR_PTR(ret);
1062 /* Btree root updates: */
1064 static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
1066 /* Root nodes cannot be reaped */
1067 mutex_lock(&c->btree_cache.lock);
1068 list_del_init(&b->list);
1069 mutex_unlock(&c->btree_cache.lock);
1072 six_lock_pcpu_alloc(&b->c.lock);
1074 six_lock_pcpu_free(&b->c.lock);
1076 mutex_lock(&c->btree_root_lock);
1077 BUG_ON(btree_node_root(c, b) &&
1078 (b->c.level < btree_node_root(c, b)->c.level ||
1079 !btree_node_dying(btree_node_root(c, b))));
1081 btree_node_root(c, b) = b;
1082 mutex_unlock(&c->btree_root_lock);
1084 bch2_recalc_btree_reserve(c);
1088 * bch_btree_set_root - update the root in memory and on disk
1090 * To ensure forward progress, the current task must not be holding any
1091 * btree node write locks. However, you must hold an intent lock on the
1094 * Note: This allocates a journal entry but doesn't add any keys to
1095 * it. All the btree roots are part of every journal write, so there
1096 * is nothing new to be done. This just guarantees that there is a
1099 static void bch2_btree_set_root(struct btree_update *as,
1100 struct btree_trans *trans,
1101 struct btree_path *path,
1104 struct bch_fs *c = as->c;
1107 trace_btree_set_root(c, b);
1108 BUG_ON(!b->written &&
1109 !test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags));
1111 old = btree_node_root(c, b);
1114 * Ensure no one is using the old root while we switch to the
1117 bch2_btree_node_lock_write(trans, path, old);
1119 bch2_btree_set_root_inmem(c, b);
1121 btree_update_updated_root(as, b);
1124 * Unlock old root after new root is visible:
1126 * The new root isn't persistent, but that's ok: we still have
1127 * an intent lock on the new root, and any updates that would
1128 * depend on the new root would have to update the new root.
1130 bch2_btree_node_unlock_write(trans, path, old);
1133 /* Interior node updates: */
1135 static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
1136 struct btree_trans *trans,
1137 struct btree_path *path,
1139 struct btree_node_iter *node_iter,
1140 struct bkey_i *insert)
1142 struct bch_fs *c = as->c;
1143 struct bkey_packed *k;
1144 const char *invalid;
1146 BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
1147 !btree_ptr_sectors_written(insert));
1149 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b)) ?:
1150 bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert));
1154 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(insert));
1155 bch2_fs_inconsistent(c, "inserting invalid bkey %s: %s", buf, invalid);
1159 BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
1160 ARRAY_SIZE(as->journal_entries));
1163 journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
1164 BCH_JSET_ENTRY_btree_keys,
1165 b->c.btree_id, b->c.level,
1166 insert, insert->k.u64s);
1168 while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
1169 bkey_iter_pos_cmp(b, k, &insert->k.p) < 0)
1170 bch2_btree_node_iter_advance(node_iter, b);
1172 bch2_btree_bset_insert_key(trans, path, b, node_iter, insert);
1173 set_btree_node_dirty(c, b);
1174 set_btree_node_need_write(b);
1178 __bch2_btree_insert_keys_interior(struct btree_update *as,
1179 struct btree_trans *trans,
1180 struct btree_path *path,
1182 struct btree_node_iter node_iter,
1183 struct keylist *keys)
1185 struct bkey_i *insert = bch2_keylist_front(keys);
1186 struct bkey_packed *k;
1188 BUG_ON(btree_node_type(b) != BKEY_TYPE_btree);
1190 while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
1191 (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0))
1194 while (!bch2_keylist_empty(keys)) {
1195 bch2_insert_fixup_btree_ptr(as, trans, path, b,
1196 &node_iter, bch2_keylist_front(keys));
1197 bch2_keylist_pop_front(keys);
1202 * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1205 static struct btree *__btree_split_node(struct btree_update *as,
1208 struct bkey_format_state s;
1209 size_t nr_packed = 0, nr_unpacked = 0;
1211 struct bset *set1, *set2;
1212 struct bkey_packed *k, *set2_start, *set2_end, *out, *prev = NULL;
1215 n2 = bch2_btree_node_alloc(as, n1->c.level);
1216 bch2_btree_update_add_new_node(as, n2);
1218 n2->data->max_key = n1->data->max_key;
1219 n2->data->format = n1->format;
1220 SET_BTREE_NODE_SEQ(n2->data, BTREE_NODE_SEQ(n1->data));
1221 n2->key.k.p = n1->key.k.p;
1223 set1 = btree_bset_first(n1);
1224 set2 = btree_bset_first(n2);
1227 * Has to be a linear search because we don't have an auxiliary
1232 struct bkey_packed *n = bkey_next(k);
1234 if (n == vstruct_last(set1))
1236 if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5)
1250 set2_end = vstruct_last(set1);
1252 set1->u64s = cpu_to_le16((u64 *) set2_start - set1->_data);
1253 set_btree_bset_end(n1, n1->set);
1255 n1->nr.live_u64s = le16_to_cpu(set1->u64s);
1256 n1->nr.bset_u64s[0] = le16_to_cpu(set1->u64s);
1257 n1->nr.packed_keys = nr_packed;
1258 n1->nr.unpacked_keys = nr_unpacked;
1260 n1_pos = bkey_unpack_pos(n1, prev);
1261 if (as->c->sb.version < bcachefs_metadata_version_snapshot)
1262 n1_pos.snapshot = U32_MAX;
1264 btree_set_max(n1, n1_pos);
1265 btree_set_min(n2, bpos_successor(n1->key.k.p));
1267 bch2_bkey_format_init(&s);
1268 bch2_bkey_format_add_pos(&s, n2->data->min_key);
1269 bch2_bkey_format_add_pos(&s, n2->data->max_key);
1271 for (k = set2_start; k != set2_end; k = bkey_next(k)) {
1272 struct bkey uk = bkey_unpack_key(n1, k);
1273 bch2_bkey_format_add_key(&s, &uk);
1276 n2->data->format = bch2_bkey_format_done(&s);
1277 btree_node_set_format(n2, n2->data->format);
1280 memset(&n2->nr, 0, sizeof(n2->nr));
1282 for (k = set2_start; k != set2_end; k = bkey_next(k)) {
1283 BUG_ON(!bch2_bkey_transform(&n2->format, out, bkey_packed(k)
1284 ? &n1->format : &bch2_bkey_format_current, k));
1285 out->format = KEY_FORMAT_LOCAL_BTREE;
1286 btree_keys_account_key_add(&n2->nr, 0, out);
1287 out = bkey_next(out);
1290 set2->u64s = cpu_to_le16((u64 *) out - set2->_data);
1291 set_btree_bset_end(n2, n2->set);
1293 BUG_ON(!set1->u64s);
1294 BUG_ON(!set2->u64s);
1296 btree_node_reset_sib_u64s(n1);
1297 btree_node_reset_sib_u64s(n2);
1299 bch2_verify_btree_nr_keys(n1);
1300 bch2_verify_btree_nr_keys(n2);
1303 btree_node_interior_verify(as->c, n1);
1304 btree_node_interior_verify(as->c, n2);
1311 * For updates to interior nodes, we've got to do the insert before we split
1312 * because the stuff we're inserting has to be inserted atomically. Post split,
1313 * the keys might have to go in different nodes and the split would no longer be
1316 * Worse, if the insert is from btree node coalescing, if we do the insert after
1317 * we do the split (and pick the pivot) - the pivot we pick might be between
1318 * nodes that were coalesced, and thus in the middle of a child node post
1321 static void btree_split_insert_keys(struct btree_update *as,
1322 struct btree_trans *trans,
1323 struct btree_path *path,
1325 struct keylist *keys)
1327 struct btree_node_iter node_iter;
1328 struct bkey_i *k = bch2_keylist_front(keys);
1329 struct bkey_packed *src, *dst, *n;
1332 bch2_btree_node_iter_init(&node_iter, b, &k->k.p);
1334 __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
1337 * We can't tolerate whiteouts here - with whiteouts there can be
1338 * duplicate keys, and it would be rather bad if we picked a duplicate
1341 i = btree_bset_first(b);
1342 src = dst = i->start;
1343 while (src != vstruct_last(i)) {
1345 if (!bkey_deleted(src)) {
1346 memmove_u64s_down(dst, src, src->u64s);
1347 dst = bkey_next(dst);
1352 /* Also clear out the unwritten whiteouts area: */
1353 b->whiteout_u64s = 0;
1355 i->u64s = cpu_to_le16((u64 *) dst - i->_data);
1356 set_btree_bset_end(b, b->set);
1358 BUG_ON(b->nsets != 1 ||
1359 b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s));
1361 btree_node_interior_verify(as->c, b);
1364 static void btree_split(struct btree_update *as, struct btree_trans *trans,
1365 struct btree_path *path, struct btree *b,
1366 struct keylist *keys, unsigned flags)
1368 struct bch_fs *c = as->c;
1369 struct btree *parent = btree_node_parent(path, b);
1370 struct btree *n1, *n2 = NULL, *n3 = NULL;
1371 u64 start_time = local_clock();
1373 BUG_ON(!parent && (b != btree_node_root(c, b)));
1374 BUG_ON(!btree_node_intent_locked(path, btree_node_root(c, b)->c.level));
1376 bch2_btree_interior_update_will_free_node(as, b);
1378 n1 = bch2_btree_node_alloc_replacement(as, b);
1379 bch2_btree_update_add_new_node(as, n1);
1382 btree_split_insert_keys(as, trans, path, n1, keys);
1384 if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) {
1385 trace_btree_split(c, b);
1387 n2 = __btree_split_node(as, n1);
1389 bch2_btree_build_aux_trees(n2);
1390 bch2_btree_build_aux_trees(n1);
1391 six_unlock_write(&n2->c.lock);
1392 six_unlock_write(&n1->c.lock);
1394 bch2_btree_node_write(c, n1, SIX_LOCK_intent);
1395 bch2_btree_node_write(c, n2, SIX_LOCK_intent);
1398 * Note that on recursive parent_keys == keys, so we
1399 * can't start adding new keys to parent_keys before emptying it
1400 * out (which we did with btree_split_insert_keys() above)
1402 bch2_keylist_add(&as->parent_keys, &n1->key);
1403 bch2_keylist_add(&as->parent_keys, &n2->key);
1406 /* Depth increases, make a new root */
1407 n3 = __btree_root_alloc(as, b->c.level + 1);
1409 n3->sib_u64s[0] = U16_MAX;
1410 n3->sib_u64s[1] = U16_MAX;
1412 btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
1414 bch2_btree_node_write(c, n3, SIX_LOCK_intent);
1417 trace_btree_compact(c, b);
1419 bch2_btree_build_aux_trees(n1);
1420 six_unlock_write(&n1->c.lock);
1422 bch2_btree_node_write(c, n1, SIX_LOCK_intent);
1425 bch2_keylist_add(&as->parent_keys, &n1->key);
1428 /* New nodes all written, now make them visible: */
1431 /* Split a non root node */
1432 bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags);
1434 bch2_btree_set_root(as, trans, path, n3);
1436 /* Root filled up but didn't need to be split */
1437 bch2_btree_set_root(as, trans, path, n1);
1440 bch2_btree_update_get_open_buckets(as, n1);
1442 bch2_btree_update_get_open_buckets(as, n2);
1444 bch2_btree_update_get_open_buckets(as, n3);
1446 /* Successful split, update the path to point to the new nodes: */
1448 six_lock_increment(&b->c.lock, SIX_LOCK_intent);
1450 bch2_trans_node_add(trans, n3);
1452 bch2_trans_node_add(trans, n2);
1453 bch2_trans_node_add(trans, n1);
1456 * The old node must be freed (in memory) _before_ unlocking the new
1457 * nodes - else another thread could re-acquire a read lock on the old
1458 * node after another thread has locked and updated the new node, thus
1459 * seeing stale data:
1461 bch2_btree_node_free_inmem(trans, b);
1464 six_unlock_intent(&n3->c.lock);
1466 six_unlock_intent(&n2->c.lock);
1467 six_unlock_intent(&n1->c.lock);
1469 bch2_trans_verify_locks(trans);
1471 bch2_time_stats_update(&c->times[n2
1472 ? BCH_TIME_btree_node_split
1473 : BCH_TIME_btree_node_compact],
1478 bch2_btree_insert_keys_interior(struct btree_update *as,
1479 struct btree_trans *trans,
1480 struct btree_path *path,
1482 struct keylist *keys)
1484 struct btree_path *linked;
1486 __bch2_btree_insert_keys_interior(as, trans, path, b,
1487 path->l[b->c.level].iter, keys);
1489 btree_update_updated_node(as, b);
1491 trans_for_each_path_with_node(trans, b, linked)
1492 bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
1494 bch2_trans_verify_paths(trans);
1498 * bch_btree_insert_node - insert bkeys into a given btree node
1500 * @iter: btree iterator
1501 * @keys: list of keys to insert
1502 * @hook: insert callback
1503 * @persistent: if not null, @persistent will wait on journal write
1505 * Inserts as many keys as it can into a given btree node, splitting it if full.
1506 * If a split occurred, this function will return early. This can only happen
1507 * for leaf nodes -- inserts into interior nodes have to be atomic.
1509 static void bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans,
1510 struct btree_path *path, struct btree *b,
1511 struct keylist *keys, unsigned flags)
1513 struct bch_fs *c = as->c;
1514 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
1515 int old_live_u64s = b->nr.live_u64s;
1516 int live_u64s_added, u64s_added;
1518 lockdep_assert_held(&c->gc_lock);
1519 BUG_ON(!btree_node_intent_locked(path, btree_node_root(c, b)->c.level));
1520 BUG_ON(!b->c.level);
1521 BUG_ON(!as || as->b);
1522 bch2_verify_keylist_sorted(keys);
1524 bch2_btree_node_lock_for_insert(trans, path, b);
1526 if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
1527 bch2_btree_node_unlock_write(trans, path, b);
1531 btree_node_interior_verify(c, b);
1533 bch2_btree_insert_keys_interior(as, trans, path, b, keys);
1535 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
1536 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
1538 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
1539 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
1540 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
1541 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
1543 if (u64s_added > live_u64s_added &&
1544 bch2_maybe_compact_whiteouts(c, b))
1545 bch2_trans_node_reinit_iter(trans, b);
1547 bch2_btree_node_unlock_write(trans, path, b);
1549 btree_node_interior_verify(c, b);
1552 btree_split(as, trans, path, b, keys, flags);
1555 int bch2_btree_split_leaf(struct btree_trans *trans,
1556 struct btree_path *path,
1559 struct bch_fs *c = trans->c;
1560 struct btree *b = path_l(path)->b;
1561 struct btree_update *as;
1565 as = bch2_btree_update_start(trans, path, path->level,
1566 btree_update_reserve_required(c, b), flags);
1570 btree_split(as, trans, path, b, NULL, flags);
1571 bch2_btree_update_done(as);
1573 for (l = path->level + 1; btree_path_node(path, l) && !ret; l++)
1574 ret = bch2_foreground_maybe_merge(trans, path, l, flags);
1579 int __bch2_foreground_maybe_merge(struct btree_trans *trans,
1580 struct btree_path *path,
1583 enum btree_node_sibling sib)
1585 struct bch_fs *c = trans->c;
1586 struct btree_path *sib_path = NULL;
1587 struct btree_update *as;
1588 struct bkey_format_state new_s;
1589 struct bkey_format new_f;
1590 struct bkey_i delete;
1591 struct btree *b, *m, *n, *prev, *next, *parent;
1592 struct bpos sib_pos;
1594 u64 start_time = local_clock();
1597 BUG_ON(!path->should_be_locked);
1598 BUG_ON(!btree_node_locked(path, level));
1600 b = path->l[level].b;
1602 if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) ||
1603 (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) {
1604 b->sib_u64s[sib] = U16_MAX;
1608 sib_pos = sib == btree_prev_sib
1609 ? bpos_predecessor(b->data->min_key)
1610 : bpos_successor(b->data->max_key);
1612 sib_path = bch2_path_get(trans, path->btree_id, sib_pos,
1613 U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_);
1614 ret = bch2_btree_path_traverse(trans, sib_path, false);
1618 sib_path->should_be_locked = true;
1620 m = sib_path->l[level].b;
1622 if (btree_node_parent(path, b) !=
1623 btree_node_parent(sib_path, m)) {
1624 b->sib_u64s[sib] = U16_MAX;
1628 if (sib == btree_prev_sib) {
1636 if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) {
1637 char buf1[100], buf2[100];
1639 bch2_bpos_to_text(&PBUF(buf1), prev->data->max_key);
1640 bch2_bpos_to_text(&PBUF(buf2), next->data->min_key);
1642 "btree topology error in btree merge:\n"
1643 " prev ends at %s\n"
1644 " next starts at %s",
1646 bch2_topology_error(c);
1651 bch2_bkey_format_init(&new_s);
1652 bch2_bkey_format_add_pos(&new_s, prev->data->min_key);
1653 __bch2_btree_calc_format(&new_s, prev);
1654 __bch2_btree_calc_format(&new_s, next);
1655 bch2_bkey_format_add_pos(&new_s, next->data->max_key);
1656 new_f = bch2_bkey_format_done(&new_s);
1658 sib_u64s = btree_node_u64s_with_format(b, &new_f) +
1659 btree_node_u64s_with_format(m, &new_f);
1661 if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
1662 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1664 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
1667 sib_u64s = min(sib_u64s, btree_max_u64s(c));
1668 sib_u64s = min(sib_u64s, (size_t) U16_MAX - 1);
1669 b->sib_u64s[sib] = sib_u64s;
1671 if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
1674 parent = btree_node_parent(path, b);
1675 as = bch2_btree_update_start(trans, path, level,
1676 btree_update_reserve_required(c, parent) + 1,
1678 BTREE_INSERT_NOFAIL|
1679 BTREE_INSERT_USE_RESERVE);
1680 ret = PTR_ERR_OR_ZERO(as);
1684 trace_btree_merge(c, b);
1686 bch2_btree_interior_update_will_free_node(as, b);
1687 bch2_btree_interior_update_will_free_node(as, m);
1689 n = bch2_btree_node_alloc(as, b->c.level);
1690 bch2_btree_update_add_new_node(as, n);
1692 btree_set_min(n, prev->data->min_key);
1693 btree_set_max(n, next->data->max_key);
1694 n->data->format = new_f;
1696 btree_node_set_format(n, new_f);
1698 bch2_btree_sort_into(c, n, prev);
1699 bch2_btree_sort_into(c, n, next);
1701 bch2_btree_build_aux_trees(n);
1702 six_unlock_write(&n->c.lock);
1704 bch2_btree_node_write(c, n, SIX_LOCK_intent);
1706 bkey_init(&delete.k);
1707 delete.k.p = prev->key.k.p;
1708 bch2_keylist_add(&as->parent_keys, &delete);
1709 bch2_keylist_add(&as->parent_keys, &n->key);
1711 bch2_trans_verify_paths(trans);
1713 bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags);
1715 bch2_trans_verify_paths(trans);
1717 bch2_btree_update_get_open_buckets(as, n);
1719 six_lock_increment(&b->c.lock, SIX_LOCK_intent);
1720 six_lock_increment(&m->c.lock, SIX_LOCK_intent);
1722 bch2_trans_node_add(trans, n);
1724 bch2_trans_verify_paths(trans);
1726 bch2_btree_node_free_inmem(trans, b);
1727 bch2_btree_node_free_inmem(trans, m);
1729 six_unlock_intent(&n->c.lock);
1731 bch2_btree_update_done(as);
1733 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_merge], start_time);
1736 bch2_path_put(trans, sib_path, true);
1737 bch2_trans_verify_locks(trans);
1742 * bch_btree_node_rewrite - Rewrite/move a btree node
1744 int bch2_btree_node_rewrite(struct btree_trans *trans,
1745 struct btree_iter *iter,
1749 struct bch_fs *c = trans->c;
1750 struct btree *n, *parent;
1751 struct btree_update *as;
1754 flags |= BTREE_INSERT_NOFAIL;
1756 parent = btree_node_parent(iter->path, b);
1757 as = bch2_btree_update_start(trans, iter->path, b->c.level,
1759 ? btree_update_reserve_required(c, parent)
1762 ret = PTR_ERR_OR_ZERO(as);
1764 trace_btree_gc_rewrite_node_fail(c, b);
1768 bch2_btree_interior_update_will_free_node(as, b);
1770 n = bch2_btree_node_alloc_replacement(as, b);
1771 bch2_btree_update_add_new_node(as, n);
1773 bch2_btree_build_aux_trees(n);
1774 six_unlock_write(&n->c.lock);
1776 trace_btree_gc_rewrite_node(c, b);
1778 bch2_btree_node_write(c, n, SIX_LOCK_intent);
1781 bch2_keylist_add(&as->parent_keys, &n->key);
1782 bch2_btree_insert_node(as, trans, iter->path, parent,
1783 &as->parent_keys, flags);
1785 bch2_btree_set_root(as, trans, iter->path, n);
1788 bch2_btree_update_get_open_buckets(as, n);
1790 six_lock_increment(&b->c.lock, SIX_LOCK_intent);
1791 bch2_trans_node_add(trans, n);
1792 bch2_btree_node_free_inmem(trans, b);
1793 six_unlock_intent(&n->c.lock);
1795 bch2_btree_update_done(as);
1797 bch2_btree_path_downgrade(iter->path);
1801 struct async_btree_rewrite {
1803 struct work_struct work;
1804 enum btree_id btree_id;
1810 static int async_btree_node_rewrite_trans(struct btree_trans *trans,
1811 struct async_btree_rewrite *a)
1813 struct btree_iter iter;
1817 bch2_trans_node_iter_init(trans, &iter, a->btree_id, a->pos,
1818 BTREE_MAX_DEPTH, a->level, 0);
1819 b = bch2_btree_iter_peek_node(&iter);
1820 ret = PTR_ERR_OR_ZERO(b);
1824 if (!b || b->data->keys.seq != a->seq)
1827 ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
1829 bch2_trans_iter_exit(trans, &iter);
1834 void async_btree_node_rewrite_work(struct work_struct *work)
1836 struct async_btree_rewrite *a =
1837 container_of(work, struct async_btree_rewrite, work);
1838 struct bch_fs *c = a->c;
1840 bch2_trans_do(c, NULL, NULL, 0,
1841 async_btree_node_rewrite_trans(&trans, a));
1842 percpu_ref_put(&c->writes);
1846 void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
1848 struct async_btree_rewrite *a;
1850 if (!test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags))
1853 if (!percpu_ref_tryget(&c->writes))
1856 a = kmalloc(sizeof(*a), GFP_NOFS);
1858 percpu_ref_put(&c->writes);
1863 a->btree_id = b->c.btree_id;
1864 a->level = b->c.level;
1865 a->pos = b->key.k.p;
1866 a->seq = b->data->keys.seq;
1868 INIT_WORK(&a->work, async_btree_node_rewrite_work);
1869 queue_work(c->btree_interior_update_worker, &a->work);
1872 static int __bch2_btree_node_update_key(struct btree_trans *trans,
1873 struct btree_iter *iter,
1874 struct btree *b, struct btree *new_hash,
1875 struct bkey_i *new_key,
1878 struct bch_fs *c = trans->c;
1879 struct btree_iter iter2 = { NULL };
1880 struct btree *parent;
1881 u64 journal_entries[BKEY_BTREE_PTR_U64s_MAX];
1884 if (!skip_triggers) {
1885 ret = bch2_trans_mark_key(trans,
1887 bkey_i_to_s_c(new_key),
1888 BTREE_TRIGGER_INSERT);
1892 ret = bch2_trans_mark_key(trans,
1893 bkey_i_to_s_c(&b->key),
1895 BTREE_TRIGGER_OVERWRITE);
1901 bkey_copy(&new_hash->key, new_key);
1902 ret = bch2_btree_node_hash_insert(&c->btree_cache,
1903 new_hash, b->c.level, b->c.btree_id);
1907 parent = btree_node_parent(iter->path, b);
1909 bch2_trans_copy_iter(&iter2, iter);
1911 iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
1912 iter2.flags & BTREE_ITER_INTENT,
1915 BUG_ON(iter2.path->level != b->c.level);
1916 BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p));
1918 btree_node_unlock(iter2.path, iter2.path->level);
1919 path_l(iter2.path)->b = BTREE_ITER_NO_NODE_UP;
1920 iter2.path->level++;
1922 ret = bch2_btree_iter_traverse(&iter2) ?:
1923 bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN);
1927 BUG_ON(btree_node_root(c, b) != b);
1929 trans->extra_journal_entries = (void *) &journal_entries[0];
1930 trans->extra_journal_entry_u64s =
1931 journal_entry_set((void *) &journal_entries[0],
1932 BCH_JSET_ENTRY_btree_root,
1933 b->c.btree_id, b->c.level,
1934 new_key, new_key->k.u64s);
1937 ret = bch2_trans_commit(trans, NULL, NULL,
1938 BTREE_INSERT_NOFAIL|
1939 BTREE_INSERT_NOCHECK_RW|
1940 BTREE_INSERT_JOURNAL_RECLAIM|
1941 BTREE_INSERT_JOURNAL_RESERVED);
1945 bch2_btree_node_lock_write(trans, iter->path, b);
1948 mutex_lock(&c->btree_cache.lock);
1949 bch2_btree_node_hash_remove(&c->btree_cache, new_hash);
1950 bch2_btree_node_hash_remove(&c->btree_cache, b);
1952 bkey_copy(&b->key, new_key);
1953 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
1955 mutex_unlock(&c->btree_cache.lock);
1957 bkey_copy(&b->key, new_key);
1960 bch2_btree_node_unlock_write(trans, iter->path, b);
1962 bch2_trans_iter_exit(trans, &iter2);
1966 mutex_lock(&c->btree_cache.lock);
1967 bch2_btree_node_hash_remove(&c->btree_cache, b);
1968 mutex_unlock(&c->btree_cache.lock);
1973 int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
1974 struct btree *b, struct bkey_i *new_key,
1977 struct bch_fs *c = trans->c;
1978 struct btree *new_hash = NULL;
1979 struct btree_path *path = iter->path;
1983 if (!btree_node_intent_locked(path, b->c.level) &&
1984 !bch2_btree_path_upgrade(trans, path, b->c.level + 1)) {
1985 btree_trans_restart(trans);
1989 closure_init_stack(&cl);
1992 * check btree_ptr_hash_val() after @b is locked by
1993 * btree_iter_traverse():
1995 if (btree_ptr_hash_val(new_key) != b->hash_val) {
1996 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1998 bch2_trans_unlock(trans);
2000 if (!bch2_trans_relock(trans))
2004 new_hash = bch2_btree_node_mem_alloc(c);
2008 ret = __bch2_btree_node_update_key(trans, iter, b, new_hash,
2009 new_key, skip_triggers);
2013 mutex_lock(&c->btree_cache.lock);
2014 list_move(&new_hash->list, &c->btree_cache.freeable);
2015 mutex_unlock(&c->btree_cache.lock);
2017 six_unlock_write(&new_hash->c.lock);
2018 six_unlock_intent(&new_hash->c.lock);
2021 bch2_btree_cache_cannibalize_unlock(c);
2025 int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
2026 struct btree *b, struct bkey_i *new_key,
2029 struct btree_iter iter;
2032 bch2_trans_node_iter_init(trans, &iter, b->c.btree_id, b->key.k.p,
2033 BTREE_MAX_DEPTH, b->c.level,
2035 ret = bch2_btree_iter_traverse(&iter);
2039 /* has node been freed? */
2040 if (iter.path->l[b->c.level].b != b) {
2041 /* node has been freed: */
2042 BUG_ON(!btree_node_dying(b));
2046 BUG_ON(!btree_node_hashed(b));
2048 ret = bch2_btree_node_update_key(trans, &iter, b, new_key, skip_triggers);
2050 bch2_trans_iter_exit(trans, &iter);
2057 * Only for filesystem bringup, when first reading the btree roots or allocating
2058 * btree roots when initializing a new filesystem:
2060 void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
2062 BUG_ON(btree_node_root(c, b));
2064 bch2_btree_set_root_inmem(c, b);
2067 void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
2073 closure_init_stack(&cl);
2076 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
2080 b = bch2_btree_node_mem_alloc(c);
2081 bch2_btree_cache_cannibalize_unlock(c);
2083 set_btree_node_fake(b);
2084 set_btree_node_need_rewrite(b);
2088 bkey_btree_ptr_init(&b->key);
2089 b->key.k.p = SPOS_MAX;
2090 *((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id;
2092 bch2_bset_init_first(b, &b->data->keys);
2093 bch2_btree_build_aux_trees(b);
2096 btree_set_min(b, POS_MIN);
2097 btree_set_max(b, SPOS_MAX);
2098 b->data->format = bch2_btree_calc_format(b);
2099 btree_node_set_format(b, b->data->format);
2101 ret = bch2_btree_node_hash_insert(&c->btree_cache, b,
2102 b->c.level, b->c.btree_id);
2105 bch2_btree_set_root_inmem(c, b);
2107 six_unlock_write(&b->c.lock);
2108 six_unlock_intent(&b->c.lock);
2111 void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
2113 struct btree_update *as;
2115 mutex_lock(&c->btree_interior_update_lock);
2116 list_for_each_entry(as, &c->btree_interior_update_list, list)
2117 pr_buf(out, "%p m %u w %u r %u j %llu\n",
2121 atomic_read(&as->cl.remaining) & CLOSURE_REMAINING_MASK,
2123 mutex_unlock(&c->btree_interior_update_lock);
2126 size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *c)
2129 struct list_head *i;
2131 mutex_lock(&c->btree_interior_update_lock);
2132 list_for_each(i, &c->btree_interior_update_list)
2134 mutex_unlock(&c->btree_interior_update_lock);
2139 void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset)
2141 struct btree_root *r;
2142 struct jset_entry *entry;
2144 mutex_lock(&c->btree_root_lock);
2146 vstruct_for_each(jset, entry)
2147 if (entry->type == BCH_JSET_ENTRY_btree_root) {
2148 r = &c->btree_roots[entry->btree_id];
2149 r->level = entry->level;
2151 bkey_copy(&r->key, &entry->start[0]);
2154 mutex_unlock(&c->btree_root_lock);
2158 bch2_btree_roots_to_journal_entries(struct bch_fs *c,
2159 struct jset_entry *start,
2160 struct jset_entry *end)
2162 struct jset_entry *entry;
2163 unsigned long have = 0;
2166 for (entry = start; entry < end; entry = vstruct_next(entry))
2167 if (entry->type == BCH_JSET_ENTRY_btree_root)
2168 __set_bit(entry->btree_id, &have);
2170 mutex_lock(&c->btree_root_lock);
2172 for (i = 0; i < BTREE_ID_NR; i++)
2173 if (c->btree_roots[i].alive && !test_bit(i, &have)) {
2174 journal_entry_set(end,
2175 BCH_JSET_ENTRY_btree_root,
2176 i, c->btree_roots[i].level,
2177 &c->btree_roots[i].key,
2178 c->btree_roots[i].key.u64s);
2179 end = vstruct_next(end);
2182 mutex_unlock(&c->btree_root_lock);
2187 void bch2_fs_btree_interior_update_exit(struct bch_fs *c)
2189 if (c->btree_interior_update_worker)
2190 destroy_workqueue(c->btree_interior_update_worker);
2191 mempool_exit(&c->btree_interior_update_pool);
2194 int bch2_fs_btree_interior_update_init(struct bch_fs *c)
2196 mutex_init(&c->btree_reserve_cache_lock);
2197 INIT_LIST_HEAD(&c->btree_interior_update_list);
2198 INIT_LIST_HEAD(&c->btree_interior_updates_unwritten);
2199 mutex_init(&c->btree_interior_update_lock);
2200 INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work);
2202 c->btree_interior_update_worker =
2203 alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
2204 if (!c->btree_interior_update_worker)
2207 return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
2208 sizeof(struct btree_update));