X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_update_interior.c;h=17a5938aa71a6b43b45c12383e4690df146ee2a3;hb=38b8d01c4cf823d9804451eff5ec811c17b03408;hp=d895d4eff0a924bc5c989234d172cb738ce95bc6;hpb=39a6bf885d4d8e9a59879931216b4b0f7a2571eb;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c index d895d4e..17a5938 100644 --- a/libbcachefs/btree_update_interior.c +++ b/libbcachefs/btree_update_interior.c @@ -5,12 +5,14 @@ #include "bkey_methods.h" #include "btree_cache.h" #include "btree_gc.h" +#include "btree_journal_iter.h" #include "btree_update.h" #include "btree_update_interior.h" #include "btree_io.h" #include "btree_iter.h" #include "btree_locking.h" #include "buckets.h" +#include "clock.h" #include "error.h" #include "extents.h" #include "journal.h" @@ -18,15 +20,31 @@ #include "keylist.h" #include "replicas.h" #include "super-io.h" +#include "trace.h" #include -#include -static void bch2_btree_insert_node(struct btree_update *, struct btree_trans *, - struct btree_path *, struct btree *, - struct keylist *, unsigned); +static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *, + btree_path_idx_t, struct btree *, + struct keylist *, unsigned); static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *); +static btree_path_idx_t get_unlocked_mut_path(struct btree_trans *trans, + enum btree_id btree_id, + unsigned level, + struct bpos pos) +{ + btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level, + BTREE_ITER_NOPRESERVE| + BTREE_ITER_INTENT, _RET_IP_); + path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_); + + struct btree_path *path = trans->paths + path_idx; + bch2_btree_path_downgrade(trans, path); + __bch2_btree_path_unlock(trans, path); + return path_idx; +} + /* Debug code: */ /* @@ -40,11 +58,11 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) struct bkey_s_c k; struct bkey_s_c_btree_ptr_v2 bp; struct bkey unpacked; - char buf1[100], buf2[100]; + struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; BUG_ON(!b->c.level); - if (!test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags)) + if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) return; bch2_btree_node_iter_init_from_start(&iter, b); @@ -55,21 +73,21 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) break; bp = bkey_s_c_to_btree_ptr_v2(k); - if (bpos_cmp(next_node, bp.v->min_key)) { + if (!bpos_eq(next_node, bp.v->min_key)) { bch2_dump_btree_node(c, b); - panic("expected next min_key %s got %s\n", - (bch2_bpos_to_text(&PBUF(buf1), next_node), buf1), - (bch2_bpos_to_text(&PBUF(buf2), bp.v->min_key), buf2)); + bch2_bpos_to_text(&buf1, next_node); + bch2_bpos_to_text(&buf2, bp.v->min_key); + panic("expected next min_key %s got %s\n", buf1.buf, buf2.buf); } bch2_btree_node_iter_advance(&iter, b); if (bch2_btree_node_iter_end(&iter)) { - if (bpos_cmp(k.k->p, b->key.k.p)) { + if (!bpos_eq(k.k->p, b->key.k.p)) { bch2_dump_btree_node(c, b); - panic("expected end %s got %s\n", - (bch2_bpos_to_text(&PBUF(buf1), b->key.k.p), buf1), - (bch2_bpos_to_text(&PBUF(buf2), k.k->p), buf2)); + bch2_bpos_to_text(&buf1, b->key.k.p); + bch2_bpos_to_text(&buf2, k.k->p); + panic("expected end %s got %s\n", buf1.buf, buf2.buf); } break; } @@ -81,7 +99,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) /* Calculate ideal packed bkey format for new btree nodes: */ -void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) +static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) { struct bkey_packed *k; struct bset_tree *t; @@ -107,43 +125,52 @@ static struct bkey_format bch2_btree_calc_format(struct btree *b) return bch2_bkey_format_done(&s); } -static size_t btree_node_u64s_with_format(struct btree *b, +static size_t btree_node_u64s_with_format(struct btree_nr_keys nr, + struct bkey_format *old_f, struct bkey_format *new_f) { - struct bkey_format *old_f = &b->format; - /* stupid integer promotion rules */ ssize_t delta = (((int) new_f->key_u64s - old_f->key_u64s) * - (int) b->nr.packed_keys) + + (int) nr.packed_keys) + (((int) new_f->key_u64s - BKEY_U64s) * - (int) b->nr.unpacked_keys); + (int) nr.unpacked_keys); - BUG_ON(delta + b->nr.live_u64s < 0); + BUG_ON(delta + nr.live_u64s < 0); - return b->nr.live_u64s + delta; + return nr.live_u64s + delta; } /** - * btree_node_format_fits - check if we could rewrite node with a new format + * bch2_btree_node_format_fits - check if we could rewrite node with a new format + * + * @c: filesystem handle + * @b: btree node to rewrite + * @nr: number of keys for new node (i.e. b->nr) + * @new_f: bkey format to translate keys to + * + * Returns: true if all re-packed keys will be able to fit in a new node. * - * This assumes all keys can pack with the new format -- it just checks if - * the re-packed keys would fit inside the node itself. + * Assumes all keys will successfully pack with the new format. */ -bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, +static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, + struct btree_nr_keys nr, struct bkey_format *new_f) { - size_t u64s = btree_node_u64s_with_format(b, new_f); + size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f); - return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c); + return __vstruct_bytes(struct btree_node, u64s) < btree_buf_bytes(b); } /* Btree node freeing/allocation: */ -static void __btree_node_free(struct bch_fs *c, struct btree *b) +static void __btree_node_free(struct btree_trans *trans, struct btree *b) { - trace_btree_node_free(c, b); + struct bch_fs *c = trans->c; + + trace_and_count(c, btree_node_free, trans, b); + BUG_ON(btree_node_write_blocked(b)); BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_need_write(b)); BUG_ON(b == btree_node_root(c, b)); @@ -159,51 +186,86 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b) } static void bch2_btree_node_free_inmem(struct btree_trans *trans, + struct btree_path *path, struct btree *b) { struct bch_fs *c = trans->c; + unsigned i, level = b->c.level; + + bch2_btree_node_lock_write_nofail(trans, path, &b->c); + bch2_btree_node_hash_remove(&c->btree_cache, b); + __btree_node_free(trans, b); + six_unlock_write(&b->c.lock); + mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); + + trans_for_each_path(trans, path, i) + if (path->l[level].b == b) { + btree_node_unlock(trans, path, level); + path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init); + } +} + +static void bch2_btree_node_free_never_used(struct btree_update *as, + struct btree_trans *trans, + struct btree *b) +{ + struct bch_fs *c = as->c; + struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL]; struct btree_path *path; + unsigned i, level = b->c.level; - trans_for_each_path(trans, path) - BUG_ON(path->l[b->c.level].b == b && - path->l[b->c.level].lock_seq == b->c.lock.state.seq); + BUG_ON(!list_empty(&b->write_blocked)); + BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as)); - six_lock_write(&b->c.lock, NULL, NULL); + b->will_make_reachable = 0; + closure_put(&as->cl); + clear_btree_node_will_make_reachable(b); + clear_btree_node_accessed(b); + clear_btree_node_dirty_acct(c, b); + clear_btree_node_need_write(b); + + mutex_lock(&c->btree_cache.lock); + list_del_init(&b->list); bch2_btree_node_hash_remove(&c->btree_cache, b); - __btree_node_free(c, b); + mutex_unlock(&c->btree_cache.lock); + + BUG_ON(p->nr >= ARRAY_SIZE(p->b)); + p->b[p->nr++] = b; - six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); + + trans_for_each_path(trans, path, i) + if (path->l[level].b == b) { + btree_node_unlock(trans, path, level); + path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init); + } } -static struct btree *__bch2_btree_node_alloc(struct bch_fs *c, +static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, struct disk_reservation *res, struct closure *cl, + bool interior_node, unsigned flags) { + struct bch_fs *c = trans->c; struct write_point *wp; struct btree *b; - __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; - struct open_buckets ob = { .nr = 0 }; + BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; + struct open_buckets obs = { .nr = 0 }; struct bch_devs_list devs_have = (struct bch_devs_list) { 0 }; - unsigned nr_reserve; - enum alloc_reserve alloc_reserve; - - if (flags & BTREE_INSERT_USE_RESERVE) { - nr_reserve = 0; - alloc_reserve = RESERVE_BTREE_MOVINGGC; - } else { - nr_reserve = BTREE_NODE_RESERVE; - alloc_reserve = RESERVE_BTREE; - } + enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; + unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim + ? BTREE_NODE_RESERVE + : 0; + int ret; mutex_lock(&c->btree_reserve_cache_lock); if (c->btree_reserve_cache_nr > nr_reserve) { struct btree_alloc *a = &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; - ob = a->ob; + obs = a->ob; bkey_copy(&tmp.k, &a->k); mutex_unlock(&c->btree_reserve_cache_lock); goto mem_alloc; @@ -211,7 +273,7 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c, mutex_unlock(&c->btree_reserve_cache_lock); retry: - wp = bch2_alloc_sectors_start(c, + ret = bch2_alloc_sectors_start_trans(trans, c->opts.metadata_target ?: c->opts.foreground_target, 0, @@ -219,16 +281,16 @@ retry: &devs_have, res->nr_replicas, c->opts.metadata_replicas_required, - alloc_reserve, 0, cl); - if (IS_ERR(wp)) - return ERR_CAST(wp); + watermark, 0, cl, &wp); + if (unlikely(ret)) + return ERR_PTR(ret); - if (wp->sectors_free < c->opts.btree_node_size) { + if (wp->sectors_free < btree_sectors(c)) { struct open_bucket *ob; unsigned i; open_bucket_for_each(c, &wp->ptrs, ob, i) - if (ob->sectors_free < c->opts.btree_node_size) + if (ob->sectors_free < btree_sectors(c)) ob->sectors_free = 0; bch2_alloc_sectors_done(c, wp); @@ -236,36 +298,44 @@ retry: } bkey_btree_ptr_v2_init(&tmp.k); - bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, c->opts.btree_node_size); + bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false); - bch2_open_bucket_get(c, wp, &ob); + bch2_open_bucket_get(c, wp, &obs); bch2_alloc_sectors_done(c, wp); mem_alloc: - b = bch2_btree_node_mem_alloc(c); + b = bch2_btree_node_mem_alloc(trans, interior_node); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); /* we hold cannibalize_lock: */ BUG_ON(IS_ERR(b)); BUG_ON(b->ob.nr); bkey_copy(&b->key, &tmp.k); - b->ob = ob; + b->ob = obs; return b; } -static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned level) +static struct btree *bch2_btree_node_alloc(struct btree_update *as, + struct btree_trans *trans, + unsigned level) { struct bch_fs *c = as->c; struct btree *b; + struct prealloc_nodes *p = &as->prealloc_nodes[!!level]; int ret; BUG_ON(level >= BTREE_MAX_DEPTH); - BUG_ON(!as->nr_prealloc_nodes); + BUG_ON(!p->nr); - b = as->prealloc_nodes[--as->nr_prealloc_nodes]; + b = p->b[--p->nr]; + + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); set_btree_node_accessed(b); - set_btree_node_dirty(c, b); + set_btree_node_dirty_acct(c, b); set_btree_node_need_write(b); bch2_bset_init_first(b, &b->data->keys); @@ -295,7 +365,8 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id); BUG_ON(ret); - trace_btree_node_alloc(c, b); + trace_and_count(c, btree_node_alloc, trans, b); + bch2_increment_clock(c, btree_sectors(c), WRITE); return b; } @@ -312,13 +383,19 @@ static void btree_set_max(struct btree *b, struct bpos pos) b->data->max_key = pos; } -struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as, - struct btree *b, - struct bkey_format format) +static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as, + struct btree_trans *trans, + struct btree *b) { - struct btree *n; + struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level); + struct bkey_format format = bch2_btree_calc_format(b); - n = bch2_btree_node_alloc(as, b->c.level); + /* + * The keys might expand with the new format - if they wouldn't fit in + * the btree node anymore, use the old format for now: + */ + if (!bch2_btree_node_format_fits(as->c, b, b->nr, &format)) + format = b->format; SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1); @@ -331,29 +408,13 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as, bch2_btree_sort_into(as->c, n, b); btree_node_reset_sib_u64s(n); - - n->key.k.p = b->key.k.p; return n; } -static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as, - struct btree *b) -{ - struct bkey_format new_f = bch2_btree_calc_format(b); - - /* - * The keys might expand with the new format - if they wouldn't fit in - * the btree node anymore, use the old format for now: - */ - if (!bch2_btree_node_format_fits(as->c, b, &new_f)) - new_f = b->format; - - return __bch2_btree_node_alloc_replacement(as, b, new_f); -} - -static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level) +static struct btree *__btree_root_alloc(struct btree_update *as, + struct btree_trans *trans, unsigned level) { - struct btree *b = bch2_btree_node_alloc(as, level); + struct btree *b = bch2_btree_node_alloc(as, trans, level); btree_set_min(b, POS_MIN); btree_set_max(b, SPOS_MAX); @@ -362,85 +423,87 @@ static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level) btree_node_set_format(b, b->data->format); bch2_btree_build_aux_trees(b); - bch2_btree_update_add_new_node(as, b); - six_unlock_write(&b->c.lock); - return b; } -static void bch2_btree_reserve_put(struct btree_update *as) +static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans *trans) { struct bch_fs *c = as->c; + struct prealloc_nodes *p; - mutex_lock(&c->btree_reserve_cache_lock); + for (p = as->prealloc_nodes; + p < as->prealloc_nodes + ARRAY_SIZE(as->prealloc_nodes); + p++) { + while (p->nr) { + struct btree *b = p->b[--p->nr]; - while (as->nr_prealloc_nodes) { - struct btree *b = as->prealloc_nodes[--as->nr_prealloc_nodes]; + mutex_lock(&c->btree_reserve_cache_lock); - six_unlock_write(&b->c.lock); + if (c->btree_reserve_cache_nr < + ARRAY_SIZE(c->btree_reserve_cache)) { + struct btree_alloc *a = + &c->btree_reserve_cache[c->btree_reserve_cache_nr++]; - if (c->btree_reserve_cache_nr < - ARRAY_SIZE(c->btree_reserve_cache)) { - struct btree_alloc *a = - &c->btree_reserve_cache[c->btree_reserve_cache_nr++]; - - a->ob = b->ob; - b->ob.nr = 0; - bkey_copy(&a->k, &b->key); - } else { - bch2_open_buckets_put(c, &b->ob); - } + a->ob = b->ob; + b->ob.nr = 0; + bkey_copy(&a->k, &b->key); + } else { + bch2_open_buckets_put(c, &b->ob); + } - btree_node_lock_type(c, b, SIX_LOCK_write); - __btree_node_free(c, b); - six_unlock_write(&b->c.lock); + mutex_unlock(&c->btree_reserve_cache_lock); - six_unlock_intent(&b->c.lock); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); + __btree_node_free(trans, b); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + } } - - mutex_unlock(&c->btree_reserve_cache_lock); } -static int bch2_btree_reserve_get(struct btree_update *as, unsigned nr_nodes, - unsigned flags, struct closure *cl) +static int bch2_btree_reserve_get(struct btree_trans *trans, + struct btree_update *as, + unsigned nr_nodes[2], + unsigned flags, + struct closure *cl) { - struct bch_fs *c = as->c; struct btree *b; - int ret; + unsigned interior; + int ret = 0; - BUG_ON(nr_nodes > BTREE_RESERVE_MAX); + BUG_ON(nr_nodes[0] + nr_nodes[1] > BTREE_RESERVE_MAX); /* * Protects reaping from the btree node cache and using the btree node * open bucket reserve: */ - ret = bch2_btree_cache_cannibalize_lock(c, cl); + ret = bch2_btree_cache_cannibalize_lock(trans, cl); if (ret) return ret; - while (as->nr_prealloc_nodes < nr_nodes) { - b = __bch2_btree_node_alloc(c, &as->disk_res, - flags & BTREE_INSERT_NOWAIT - ? NULL : cl, flags); - if (IS_ERR(b)) { - ret = PTR_ERR(b); - goto err_free; - } + for (interior = 0; interior < 2; interior++) { + struct prealloc_nodes *p = as->prealloc_nodes + interior; - as->prealloc_nodes[as->nr_prealloc_nodes++] = b; - } + while (p->nr < nr_nodes[interior]) { + b = __bch2_btree_node_alloc(trans, &as->disk_res, cl, + interior, flags); + if (IS_ERR(b)) { + ret = PTR_ERR(b); + goto err; + } - bch2_btree_cache_cannibalize_unlock(c); - return 0; -err_free: - bch2_btree_cache_cannibalize_unlock(c); - trace_btree_reserve_get_fail(c, nr_nodes, cl); + p->b[p->nr++] = b; + } + } +err: + bch2_btree_cache_cannibalize_unlock(trans); return ret; } /* Asynchronous interior node update machinery */ -static void bch2_btree_update_free(struct btree_update *as) +static void bch2_btree_update_free(struct btree_update *as, struct btree_trans *trans) { struct bch_fs *c = as->c; @@ -448,12 +511,10 @@ static void bch2_btree_update_free(struct btree_update *as) up_read(&c->gc_lock); as->took_gc_lock = false; - bch2_journal_preres_put(&c->journal, &as->journal_preres); - bch2_journal_pin_drop(&c->journal, &as->journal); bch2_journal_pin_flush(&c->journal, &as->journal); bch2_disk_reservation_put(c, &as->disk_res); - bch2_btree_reserve_put(as); + bch2_btree_reserve_put(as, trans); bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_total], as->start_time); @@ -474,20 +535,18 @@ static void bch2_btree_update_free(struct btree_update *as) mutex_unlock(&c->btree_interior_update_lock); } -static void btree_update_will_delete_key(struct btree_update *as, - struct bkey_i *k) +static void btree_update_add_key(struct btree_update *as, + struct keylist *keys, struct btree *b) { - BUG_ON(bch2_keylist_u64s(&as->old_keys) + k->k.u64s > + struct bkey_i *k = &b->key; + + BUG_ON(bch2_keylist_u64s(keys) + k->k.u64s > ARRAY_SIZE(as->_old_keys)); - bch2_keylist_add(&as->old_keys, k); -} -static void btree_update_will_add_key(struct btree_update *as, - struct bkey_i *k) -{ - BUG_ON(bch2_keylist_u64s(&as->new_keys) + k->k.u64s > - ARRAY_SIZE(as->_new_keys)); - bch2_keylist_add(&as->new_keys, k); + bkey_copy(keys->top, k); + bkey_i_to_btree_ptr_v2(keys->top)->v.mem_ptr = b->c.level + 1; + + bch2_keylist_push(keys); } /* @@ -497,27 +556,29 @@ static void btree_update_will_add_key(struct btree_update *as, static int btree_update_nodes_written_trans(struct btree_trans *trans, struct btree_update *as) { - struct bkey_i *k; - int ret; + struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, as->journal_u64s); + int ret = PTR_ERR_OR_ZERO(e); + if (ret) + return ret; + + memcpy(e, as->journal_entries, as->journal_u64s * sizeof(u64)); - trans->extra_journal_entries = (void *) &as->journal_entries[0]; - trans->extra_journal_entry_u64s = as->journal_u64s; trans->journal_pin = &as->journal; - for_each_keylist_key(&as->new_keys, k) { - ret = bch2_trans_mark_key(trans, - bkey_s_c_null, - bkey_i_to_s_c(k), - BTREE_TRIGGER_INSERT); + for_each_keylist_key(&as->old_keys, k) { + unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr; + + ret = bch2_key_trigger_old(trans, as->btree_id, level, bkey_i_to_s_c(k), + BTREE_TRIGGER_TRANSACTIONAL); if (ret) return ret; } - for_each_keylist_key(&as->old_keys, k) { - ret = bch2_trans_mark_key(trans, - bkey_i_to_s_c(k), - bkey_s_c_null, - BTREE_TRIGGER_OVERWRITE); + for_each_keylist_key(&as->new_keys, k) { + unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr; + + ret = bch2_key_trigger_new(trans, as->btree_id, level, bkey_i_to_s(k), + BTREE_TRIGGER_TRANSACTIONAL); if (ret) return ret; } @@ -528,8 +589,8 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans, static void btree_update_nodes_written(struct btree_update *as) { struct bch_fs *c = as->c; - struct btree *b = as->b; - struct btree_trans trans; + struct btree *b; + struct btree_trans *trans = bch2_trans_get(c); u64 journal_seq = 0; unsigned i; int ret; @@ -545,22 +606,21 @@ static void btree_update_nodes_written(struct btree_update *as) if (ret) goto err; - BUG_ON(!journal_pin_active(&as->journal)); - /* * Wait for any in flight writes to finish before we free the old nodes * on disk: */ for (i = 0; i < as->nr_old_nodes; i++) { - struct btree *old = as->old_nodes[i]; __le64 seq; - six_lock_read(&old->c.lock, NULL, NULL); - seq = old->data ? old->data->keys.seq : 0; - six_unlock_read(&old->c.lock); + b = as->old_nodes[i]; + + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); + seq = b->data ? b->data->keys.seq : 0; + six_unlock_read(&b->c.lock); if (seq == as->old_nodes_seq[i]) - wait_on_bit_io(&old->flags, BTREE_NODE_write_in_flight_inner, + wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight_inner, TASK_UNINTERRUPTIBLE); } @@ -577,19 +637,23 @@ static void btree_update_nodes_written(struct btree_update *as) * journal reclaim does btree updates when flushing bkey_cached entries, * which may require allocations as well. */ - bch2_trans_init(&trans, c, 0, 512); - ret = __bch2_trans_do(&trans, &as->disk_res, &journal_seq, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_NOCHECK_RW| - BTREE_INSERT_JOURNAL_RECLAIM| - BTREE_INSERT_JOURNAL_RESERVED, - btree_update_nodes_written_trans(&trans, as)); - bch2_trans_exit(&trans); + ret = commit_do(trans, &as->disk_res, &journal_seq, + BCH_WATERMARK_reclaim| + BCH_TRANS_COMMIT_no_enospc| + BCH_TRANS_COMMIT_no_check_rw| + BCH_TRANS_COMMIT_journal_reclaim, + btree_update_nodes_written_trans(trans, as)); + bch2_trans_unlock(trans); bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c, - "error %i in btree_update_nodes_written()", ret); + "%s(): error %s", __func__, bch2_err_str(ret)); err: - if (b) { + if (as->b) { + + b = as->b; + btree_path_idx_t path_idx = get_unlocked_mut_path(trans, + as->btree_id, b->c.level, b->key.k.p); + struct btree_path *path = trans->paths + path_idx; /* * @b is the node we did the final insert into: * @@ -602,26 +666,44 @@ err: * we're in journal error state: */ - btree_node_lock_type(c, b, SIX_LOCK_intent); - btree_node_lock_type(c, b, SIX_LOCK_write); + /* + * Ensure transaction is unlocked before using + * btree_node_lock_nopath() (the use of which is always suspect, + * we need to work on removing this in the future) + * + * It should be, but get_unlocked_mut_path() -> bch2_path_get() + * calls bch2_path_upgrade(), before we call path_make_mut(), so + * we may rarely end up with a locked path besides the one we + * have here: + */ + bch2_trans_unlock(trans); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); + mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); + path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); + path->l[b->c.level].b = b; + + bch2_btree_node_lock_write_nofail(trans, path, &b->c); + mutex_lock(&c->btree_interior_update_lock); list_del(&as->write_blocked_list); + if (list_empty(&b->write_blocked)) + clear_btree_node_write_blocked(b); /* * Node might have been freed, recheck under * btree_interior_update_lock: */ if (as->b == b) { - struct bset *i = btree_bset_last(b); - BUG_ON(!b->c.level); BUG_ON(!btree_node_dirty(b)); if (!ret) { - i->journal_seq = cpu_to_le64( - max(journal_seq, - le64_to_cpu(i->journal_seq))); + struct bset *last = btree_bset_last(b); + + last->journal_seq = cpu_to_le64( + max(journal_seq, + le64_to_cpu(last->journal_seq))); bch2_btree_add_journal_pin(c, b, journal_seq); } else { @@ -635,29 +717,31 @@ err: } mutex_unlock(&c->btree_interior_update_lock); + + mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); six_unlock_write(&b->c.lock); btree_node_write_if_need(c, b, SIX_LOCK_intent); - six_unlock_intent(&b->c.lock); + btree_node_unlock(trans, path, b->c.level); + bch2_path_put(trans, path_idx, true); } bch2_journal_pin_drop(&c->journal, &as->journal); - bch2_journal_preres_put(&c->journal, &as->journal_preres); - mutex_lock(&c->btree_interior_update_lock); for (i = 0; i < as->nr_new_nodes; i++) { b = as->new_nodes[i]; BUG_ON(b->will_make_reachable != (unsigned long) as); b->will_make_reachable = 0; + clear_btree_node_will_make_reachable(b); } mutex_unlock(&c->btree_interior_update_lock); for (i = 0; i < as->nr_new_nodes; i++) { b = as->new_nodes[i]; - btree_node_lock_type(c, b, SIX_LOCK_read); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); btree_node_write_if_need(c, b, SIX_LOCK_read); six_unlock_read(&b->c.lock); } @@ -665,7 +749,8 @@ err: for (i = 0; i < as->nr_open_buckets; i++) bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]); - bch2_btree_update_free(as); + bch2_btree_update_free(as, trans); + bch2_trans_put(trans); } static void btree_interior_update_work(struct work_struct *work) @@ -689,9 +774,9 @@ static void btree_interior_update_work(struct work_struct *work) } } -static void btree_update_set_nodes_written(struct closure *cl) +static CLOSURE_CALLBACK(btree_update_set_nodes_written) { - struct btree_update *as = container_of(cl, struct btree_update, cl); + closure_type(as, struct btree_update, cl); struct bch_fs *c = as->c; mutex_lock(&c->btree_interior_update_lock); @@ -714,14 +799,23 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b) BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE); BUG_ON(!btree_node_dirty(b)); + BUG_ON(!b->c.level); as->mode = BTREE_INTERIOR_UPDATING_NODE; as->b = b; + + set_btree_node_write_blocked(b); list_add(&as->write_blocked_list, &b->write_blocked); mutex_unlock(&c->btree_interior_update_lock); } +static int bch2_update_reparent_journal_pin_flush(struct journal *j, + struct journal_entry_pin *_pin, u64 seq) +{ + return 0; +} + static void btree_update_reparent(struct btree_update *as, struct btree_update *child) { @@ -732,7 +826,8 @@ static void btree_update_reparent(struct btree_update *as, child->b = NULL; child->mode = BTREE_INTERIOR_UPDATING_AS; - bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, NULL); + bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, + bch2_update_reparent_journal_pin_flush); } static void btree_update_updated_root(struct btree_update *as, struct btree *b) @@ -782,10 +877,19 @@ static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree as->new_nodes[as->nr_new_nodes++] = b; b->will_make_reachable = 1UL|(unsigned long) as; + set_btree_node_will_make_reachable(b); mutex_unlock(&c->btree_interior_update_lock); - btree_update_will_add_key(as, &b->key); + btree_update_add_key(as, &as->new_keys, b); + + if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { + unsigned bytes = vstruct_end(&b->data->keys) - (void *) b->data; + unsigned sectors = round_up(bytes, block_bytes(c)) >> 9; + + bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written = + cpu_to_le16(sectors); + } } /* @@ -804,6 +908,7 @@ static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b) * xchg() is for synchronization with bch2_btree_complete_write: */ v = xchg(&b->will_make_reachable, 0); + clear_btree_node_will_make_reachable(b); as = (struct btree_update *) (v & ~1UL); if (!as) { @@ -831,13 +936,19 @@ static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct b b->ob.v[--b->ob.nr]; } +static int bch2_btree_update_will_free_node_journal_pin_flush(struct journal *j, + struct journal_entry_pin *_pin, u64 seq) +{ + return 0; +} + /* * @b is being split/rewritten: it may have pointers to not-yet-written btree * nodes and thus outstanding btree_updates - redirect @b's * btree_updates to point to this btree_update: */ static void bch2_btree_interior_update_will_free_node(struct btree_update *as, - struct btree *b) + struct btree *b) { struct bch_fs *c = as->c; struct btree_update *p, *n; @@ -869,8 +980,9 @@ static void bch2_btree_interior_update_will_free_node(struct btree_update *as, closure_wake_up(&c->btree_interior_update_wait); } - clear_btree_node_dirty(c, b); + clear_btree_node_dirty_acct(c, b); clear_btree_node_need_write(b); + clear_btree_node_write_blocked(b); /* * Does this node have unwritten data that has a pin on the journal? @@ -881,11 +993,13 @@ static void bch2_btree_interior_update_will_free_node(struct btree_update *as, * when the new nodes are persistent and reachable on disk: */ w = btree_current_write(b); - bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL); + bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, + bch2_btree_update_will_free_node_journal_pin_flush); bch2_journal_pin_drop(&c->journal, &w->journal); w = btree_prev_write(b); - bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL); + bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, + bch2_btree_update_will_free_node_journal_pin_flush); bch2_journal_pin_drop(&c->journal, &w->journal); mutex_unlock(&c->btree_interior_update_lock); @@ -901,14 +1015,14 @@ static void bch2_btree_interior_update_will_free_node(struct btree_update *as, */ btree_update_drop_new_node(c, b); - btree_update_will_delete_key(as, &b->key); + btree_update_add_key(as, &as->old_keys, b); as->old_nodes[as->nr_old_nodes] = b; as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq; as->nr_old_nodes++; } -static void bch2_btree_update_done(struct btree_update *as) +static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *trans) { struct bch_fs *c = as->c; u64 start_time = as->start_time; @@ -919,7 +1033,7 @@ static void bch2_btree_update_done(struct btree_update *as) up_read(&as->c->gc_lock); as->took_gc_lock = false; - bch2_btree_reserve_put(as); + bch2_btree_reserve_put(as, trans); continue_at(&as->cl, btree_update_set_nodes_written, as->c->btree_interior_update_worker); @@ -930,55 +1044,83 @@ static void bch2_btree_update_done(struct btree_update *as) static struct btree_update * bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, - unsigned level, unsigned nr_nodes, unsigned flags) + unsigned level, bool split, unsigned flags) { struct bch_fs *c = trans->c; struct btree_update *as; - struct closure cl; u64 start_time = local_clock(); - int disk_res_flags = (flags & BTREE_INSERT_NOFAIL) + int disk_res_flags = (flags & BCH_TRANS_COMMIT_no_enospc) ? BCH_DISK_RESERVATION_NOFAIL : 0; - int journal_flags = 0; + unsigned nr_nodes[2] = { 0, 0 }; + unsigned update_level = level; + enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; int ret = 0; + u32 restart_count = trans->restart_count; BUG_ON(!path->should_be_locked); - if (flags & BTREE_INSERT_JOURNAL_RESERVED) - journal_flags |= JOURNAL_RES_GET_RESERVED; + if (watermark == BCH_WATERMARK_copygc) + watermark = BCH_WATERMARK_btree_copygc; + if (watermark < BCH_WATERMARK_btree) + watermark = BCH_WATERMARK_btree; - closure_init_stack(&cl); -retry: + flags &= ~BCH_WATERMARK_MASK; + flags |= watermark; - /* - * XXX: figure out how far we might need to split, - * instead of locking/reserving all the way to the root: - */ - if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) { - trace_trans_restart_iter_upgrade(trans->ip, _RET_IP_, - path->btree_id, &path->pos); - ret = btree_trans_restart(trans); - return ERR_PTR(ret); + if (!(flags & BCH_TRANS_COMMIT_journal_reclaim) && + watermark < c->journal.watermark) { + struct journal_res res = { 0 }; + + ret = drop_locks_do(trans, + bch2_journal_res_get(&c->journal, &res, 1, + watermark|JOURNAL_RES_GET_CHECK)); + if (ret) + return ERR_PTR(ret); } - if (flags & BTREE_INSERT_GC_LOCK_HELD) - lockdep_assert_held(&c->gc_lock); - else if (!down_read_trylock(&c->gc_lock)) { - bch2_trans_unlock(trans); - down_read(&c->gc_lock); - if (!bch2_trans_relock(trans)) { + while (1) { + nr_nodes[!!update_level] += 1 + split; + update_level++; + + ret = bch2_btree_path_upgrade(trans, path, update_level + 1); + if (ret) + return ERR_PTR(ret); + + if (!btree_path_node(path, update_level)) { + /* Allocating new root? */ + nr_nodes[1] += split; + update_level = BTREE_MAX_DEPTH; + break; + } + + /* + * Always check for space for two keys, even if we won't have to + * split at prior level - it might have been a merge instead: + */ + if (bch2_btree_node_insert_fits(path->l[update_level].b, + BKEY_BTREE_PTR_U64s_MAX * 2)) + break; + + split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c); + } + + if (!down_read_trylock(&c->gc_lock)) { + ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0)); + if (ret) { up_read(&c->gc_lock); - return ERR_PTR(-EINTR); + return ERR_PTR(ret); } } - as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO); + as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS); memset(as, 0, sizeof(*as)); closure_init(&as->cl, NULL); as->c = c; as->start_time = start_time; as->mode = BTREE_INTERIOR_NO_UPDATE; - as->took_gc_lock = !(flags & BTREE_INSERT_GC_LOCK_HELD); + as->took_gc_lock = true; as->btree_id = path->btree_id; + as->update_level = update_level; INIT_LIST_HEAD(&as->list); INIT_LIST_HEAD(&as->unwritten_list); INIT_LIST_HEAD(&as->write_blocked_list); @@ -1002,60 +1144,56 @@ retry: if (ret) goto err; - ret = bch2_journal_preres_get(&c->journal, &as->journal_preres, - BTREE_UPDATE_JOURNAL_RES, - journal_flags|JOURNAL_RES_GET_NONBLOCK); - if (ret == -EAGAIN) { - bch2_trans_unlock(trans); + ret = bch2_disk_reservation_get(c, &as->disk_res, + (nr_nodes[0] + nr_nodes[1]) * btree_sectors(c), + c->opts.metadata_replicas, + disk_res_flags); + if (ret) + goto err; - if (flags & BTREE_INSERT_JOURNAL_RECLAIM) { - bch2_btree_update_free(as); - btree_trans_restart(trans); - return ERR_PTR(ret); - } + ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, NULL); + if (bch2_err_matches(ret, ENOSPC) || + bch2_err_matches(ret, ENOMEM)) { + struct closure cl; - ret = bch2_journal_preres_get(&c->journal, &as->journal_preres, - BTREE_UPDATE_JOURNAL_RES, - journal_flags); - if (ret) { - trace_trans_restart_journal_preres_get(trans->ip, _RET_IP_); + /* + * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK + * flag + */ + if (bch2_err_matches(ret, ENOSPC) && + (flags & BCH_TRANS_COMMIT_journal_reclaim) && + watermark != BCH_WATERMARK_reclaim) { + ret = -BCH_ERR_journal_reclaim_would_deadlock; goto err; } - if (!bch2_trans_relock(trans)) { - ret = -EINTR; - goto err; - } + closure_init_stack(&cl); + + do { + ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl); + + bch2_trans_unlock(trans); + closure_sync(&cl); + } while (bch2_err_matches(ret, BCH_ERR_operation_blocked)); } - ret = bch2_disk_reservation_get(c, &as->disk_res, - nr_nodes * c->opts.btree_node_size, - c->opts.metadata_replicas, - disk_res_flags); - if (ret) + if (ret) { + trace_and_count(c, btree_reserve_get_fail, trans->fn, + _RET_IP_, nr_nodes[0] + nr_nodes[1], ret); goto err; + } - ret = bch2_btree_reserve_get(as, nr_nodes, flags, &cl); + ret = bch2_trans_relock(trans); if (ret) goto err; - bch2_journal_pin_add(&c->journal, - atomic64_read(&c->journal.seq), - &as->journal, NULL); - + bch2_trans_verify_not_restarted(trans, restart_count); return as; err: - bch2_btree_update_free(as); - - if (ret == -EAGAIN) { - bch2_trans_unlock(trans); - closure_sync(&cl); - ret = -EINTR; - } - - if (ret == -EINTR && bch2_trans_relock(trans)) - goto retry; - + bch2_btree_update_free(as, trans); + if (!bch2_err_matches(ret, ENOSPC) && + !bch2_err_matches(ret, EROFS)) + bch_err_fn_ratelimited(c, ret); return ERR_PTR(ret); } @@ -1068,34 +1206,17 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b) list_del_init(&b->list); mutex_unlock(&c->btree_cache.lock); - if (b->c.level) - six_lock_pcpu_alloc(&b->c.lock); - else - six_lock_pcpu_free(&b->c.lock); - mutex_lock(&c->btree_root_lock); BUG_ON(btree_node_root(c, b) && (b->c.level < btree_node_root(c, b)->c.level || !btree_node_dying(btree_node_root(c, b)))); - btree_node_root(c, b) = b; + bch2_btree_id_root(c, b->c.btree_id)->b = b; mutex_unlock(&c->btree_root_lock); bch2_recalc_btree_reserve(c); } -/** - * bch_btree_set_root - update the root in memory and on disk - * - * To ensure forward progress, the current task must not be holding any - * btree node write locks. However, you must hold an intent lock on the - * old root. - * - * Note: This allocates a journal entry but doesn't add any keys to - * it. All the btree roots are part of every journal write, so there - * is nothing new to be done. This just guarantees that there is a - * journal write. - */ static void bch2_btree_set_root(struct btree_update *as, struct btree_trans *trans, struct btree_path *path, @@ -1104,9 +1225,7 @@ static void bch2_btree_set_root(struct btree_update *as, struct bch_fs *c = as->c; struct btree *old; - trace_btree_set_root(c, b); - BUG_ON(!b->written && - !test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags)); + trace_and_count(c, btree_node_set_root, trans, b); old = btree_node_root(c, b); @@ -1114,7 +1233,7 @@ static void bch2_btree_set_root(struct btree_update *as, * Ensure no one is using the old root while we switch to the * new root: */ - bch2_btree_node_lock_write(trans, path, old); + bch2_btree_node_lock_write_nofail(trans, path, &old->c); bch2_btree_set_root_inmem(c, b); @@ -1141,18 +1260,27 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, { struct bch_fs *c = as->c; struct bkey_packed *k; - const char *invalid; + struct printbuf buf = PRINTBUF; + unsigned long old, new, v; BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 && !btree_ptr_sectors_written(insert)); - invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b)) ?: - bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert)); - if (invalid) { - char buf[160]; - - bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(insert)); - bch2_fs_inconsistent(c, "inserting invalid bkey %s: %s", buf, invalid); + if (unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))) + bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p); + + if (bch2_bkey_invalid(c, bkey_i_to_s_c(insert), + btree_node_type(b), WRITE, &buf) ?: + bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf)) { + printbuf_reset(&buf); + prt_printf(&buf, "inserting invalid bkey\n "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); + prt_printf(&buf, "\n "); + bch2_bkey_invalid(c, bkey_i_to_s_c(insert), + btree_node_type(b), WRITE, &buf); + bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf); + + bch2_fs_inconsistent(c, "%s", buf.buf); dump_stack(); } @@ -1170,8 +1298,18 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, bch2_btree_node_iter_advance(node_iter, b); bch2_btree_bset_insert_key(trans, path, b, node_iter, insert); - set_btree_node_dirty(c, b); - set_btree_node_need_write(b); + set_btree_node_dirty_acct(c, b); + + v = READ_ONCE(b->flags); + do { + old = new = v; + + new &= ~BTREE_WRITE_TYPE_MASK; + new |= BTREE_WRITE_interior; + new |= 1 << BTREE_NODE_need_write; + } while ((v = cmpxchg(&b->flags, old, new)) != old); + + printbuf_exit(&buf); } static void @@ -1192,8 +1330,12 @@ __bch2_btree_insert_keys_interior(struct btree_update *as, ; while (!bch2_keylist_empty(keys)) { - bch2_insert_fixup_btree_ptr(as, trans, path, b, - &node_iter, bch2_keylist_front(keys)); + insert = bch2_keylist_front(keys); + + if (bpos_gt(insert->k.p, b->key.k.p)) + break; + + bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); bch2_keylist_pop_front(keys); } } @@ -1202,109 +1344,103 @@ __bch2_btree_insert_keys_interior(struct btree_update *as, * Move keys from n1 (original replacement node, now lower node) to n2 (higher * node) */ -static struct btree *__btree_split_node(struct btree_update *as, - struct btree *n1) +static void __btree_split_node(struct btree_update *as, + struct btree_trans *trans, + struct btree *b, + struct btree *n[2]) { - struct bkey_format_state s; - size_t nr_packed = 0, nr_unpacked = 0; - struct btree *n2; - struct bset *set1, *set2; - struct bkey_packed *k, *set2_start, *set2_end, *out, *prev = NULL; - struct bpos n1_pos; + struct bkey_packed *k; + struct bpos n1_pos = POS_MIN; + struct btree_node_iter iter; + struct bset *bsets[2]; + struct bkey_format_state format[2]; + struct bkey_packed *out[2]; + struct bkey uk; + unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5; + struct { unsigned nr_keys, val_u64s; } nr_keys[2]; + int i; - n2 = bch2_btree_node_alloc(as, n1->c.level); - bch2_btree_update_add_new_node(as, n2); + memset(&nr_keys, 0, sizeof(nr_keys)); - n2->data->max_key = n1->data->max_key; - n2->data->format = n1->format; - SET_BTREE_NODE_SEQ(n2->data, BTREE_NODE_SEQ(n1->data)); - n2->key.k.p = n1->key.k.p; + for (i = 0; i < 2; i++) { + BUG_ON(n[i]->nsets != 1); - set1 = btree_bset_first(n1); - set2 = btree_bset_first(n2); + bsets[i] = btree_bset_first(n[i]); + out[i] = bsets[i]->start; - /* - * Has to be a linear search because we don't have an auxiliary - * search tree yet - */ - k = set1->start; - while (1) { - struct bkey_packed *n = bkey_next(k); + SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1); + bch2_bkey_format_init(&format[i]); + } - if (n == vstruct_last(set1)) - break; - if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5) - break; + u64s = 0; + for_each_btree_node_key(b, k, &iter) { + if (bkey_deleted(k)) + continue; - if (bkey_packed(k)) - nr_packed++; - else - nr_unpacked++; + i = u64s >= n1_u64s; + u64s += k->u64s; + uk = bkey_unpack_key(b, k); + if (!i) + n1_pos = uk.p; + bch2_bkey_format_add_key(&format[i], &uk); - prev = k; - k = n; + nr_keys[i].nr_keys++; + nr_keys[i].val_u64s += bkeyp_val_u64s(&b->format, k); } - BUG_ON(!prev); - set2_start = k; - set2_end = vstruct_last(set1); + btree_set_min(n[0], b->data->min_key); + btree_set_max(n[0], n1_pos); + btree_set_min(n[1], bpos_successor(n1_pos)); + btree_set_max(n[1], b->data->max_key); - set1->u64s = cpu_to_le16((u64 *) set2_start - set1->_data); - set_btree_bset_end(n1, n1->set); + for (i = 0; i < 2; i++) { + bch2_bkey_format_add_pos(&format[i], n[i]->data->min_key); + bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key); - n1->nr.live_u64s = le16_to_cpu(set1->u64s); - n1->nr.bset_u64s[0] = le16_to_cpu(set1->u64s); - n1->nr.packed_keys = nr_packed; - n1->nr.unpacked_keys = nr_unpacked; + n[i]->data->format = bch2_bkey_format_done(&format[i]); - n1_pos = bkey_unpack_pos(n1, prev); - if (as->c->sb.version < bcachefs_metadata_version_snapshot) - n1_pos.snapshot = U32_MAX; + unsigned u64s = nr_keys[i].nr_keys * n[i]->data->format.key_u64s + + nr_keys[i].val_u64s; + if (__vstruct_bytes(struct btree_node, u64s) > btree_buf_bytes(b)) + n[i]->data->format = b->format; - btree_set_max(n1, n1_pos); - btree_set_min(n2, bpos_successor(n1->key.k.p)); + btree_node_set_format(n[i], n[i]->data->format); + } - bch2_bkey_format_init(&s); - bch2_bkey_format_add_pos(&s, n2->data->min_key); - bch2_bkey_format_add_pos(&s, n2->data->max_key); + u64s = 0; + for_each_btree_node_key(b, k, &iter) { + if (bkey_deleted(k)) + continue; - for (k = set2_start; k != set2_end; k = bkey_next(k)) { - struct bkey uk = bkey_unpack_key(n1, k); - bch2_bkey_format_add_key(&s, &uk); - } + i = u64s >= n1_u64s; + u64s += k->u64s; - n2->data->format = bch2_bkey_format_done(&s); - btree_node_set_format(n2, n2->data->format); + if (bch2_bkey_transform(&n[i]->format, out[i], bkey_packed(k) + ? &b->format: &bch2_bkey_format_current, k)) + out[i]->format = KEY_FORMAT_LOCAL_BTREE; + else + bch2_bkey_unpack(b, (void *) out[i], k); - out = set2->start; - memset(&n2->nr, 0, sizeof(n2->nr)); + out[i]->needs_whiteout = false; - for (k = set2_start; k != set2_end; k = bkey_next(k)) { - BUG_ON(!bch2_bkey_transform(&n2->format, out, bkey_packed(k) - ? &n1->format : &bch2_bkey_format_current, k)); - out->format = KEY_FORMAT_LOCAL_BTREE; - btree_keys_account_key_add(&n2->nr, 0, out); - out = bkey_next(out); + btree_keys_account_key_add(&n[i]->nr, 0, out[i]); + out[i] = bkey_p_next(out[i]); } - set2->u64s = cpu_to_le16((u64 *) out - set2->_data); - set_btree_bset_end(n2, n2->set); + for (i = 0; i < 2; i++) { + bsets[i]->u64s = cpu_to_le16((u64 *) out[i] - bsets[i]->_data); - BUG_ON(!set1->u64s); - BUG_ON(!set2->u64s); + BUG_ON(!bsets[i]->u64s); - btree_node_reset_sib_u64s(n1); - btree_node_reset_sib_u64s(n2); + set_btree_bset_end(n[i], n[i]->set); - bch2_verify_btree_nr_keys(n1); - bch2_verify_btree_nr_keys(n2); + btree_node_reset_sib_u64s(n[i]); - if (n1->c.level) { - btree_node_interior_verify(as->c, n1); - btree_node_interior_verify(as->c, n2); - } + bch2_verify_btree_nr_keys(n[i]); - return n2; + if (b->c.level) + btree_node_interior_verify(as->c, n[i]); + } } /* @@ -1320,79 +1456,73 @@ static struct btree *__btree_split_node(struct btree_update *as, */ static void btree_split_insert_keys(struct btree_update *as, struct btree_trans *trans, - struct btree_path *path, + btree_path_idx_t path_idx, struct btree *b, struct keylist *keys) { - struct btree_node_iter node_iter; - struct bkey_i *k = bch2_keylist_front(keys); - struct bkey_packed *src, *dst, *n; - struct bset *i; - - bch2_btree_node_iter_init(&node_iter, b, &k->k.p); - - __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); - - /* - * We can't tolerate whiteouts here - with whiteouts there can be - * duplicate keys, and it would be rather bad if we picked a duplicate - * for the pivot: - */ - i = btree_bset_first(b); - src = dst = i->start; - while (src != vstruct_last(i)) { - n = bkey_next(src); - if (!bkey_deleted(src)) { - memmove_u64s_down(dst, src, src->u64s); - dst = bkey_next(dst); - } - src = n; - } + struct btree_path *path = trans->paths + path_idx; - /* Also clear out the unwritten whiteouts area: */ - b->whiteout_u64s = 0; + if (!bch2_keylist_empty(keys) && + bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) { + struct btree_node_iter node_iter; - i->u64s = cpu_to_le16((u64 *) dst - i->_data); - set_btree_bset_end(b, b->set); + bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); - BUG_ON(b->nsets != 1 || - b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s)); + __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); - btree_node_interior_verify(as->c, b); + btree_node_interior_verify(as->c, b); + } } -static void btree_split(struct btree_update *as, struct btree_trans *trans, - struct btree_path *path, struct btree *b, - struct keylist *keys, unsigned flags) +static int btree_split(struct btree_update *as, struct btree_trans *trans, + btree_path_idx_t path, struct btree *b, + struct keylist *keys, unsigned flags) { struct bch_fs *c = as->c; - struct btree *parent = btree_node_parent(path, b); + struct btree *parent = btree_node_parent(trans->paths + path, b); struct btree *n1, *n2 = NULL, *n3 = NULL; + btree_path_idx_t path1 = 0, path2 = 0; u64 start_time = local_clock(); + int ret = 0; BUG_ON(!parent && (b != btree_node_root(c, b))); - BUG_ON(!btree_node_intent_locked(path, btree_node_root(c, b)->c.level)); + BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1)); bch2_btree_interior_update_will_free_node(as, b); - n1 = bch2_btree_node_alloc_replacement(as, b); - bch2_btree_update_add_new_node(as, n1); + if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) { + struct btree *n[2]; - if (keys) - btree_split_insert_keys(as, trans, path, n1, keys); + trace_and_count(c, btree_node_split, trans, b); - if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) { - trace_btree_split(c, b); + n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level); + n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level); - n2 = __btree_split_node(as, n1); + __btree_split_node(as, trans, b, n); + + if (keys) { + btree_split_insert_keys(as, trans, path, n1, keys); + btree_split_insert_keys(as, trans, path, n2, keys); + BUG_ON(!bch2_keylist_empty(keys)); + } bch2_btree_build_aux_trees(n2); bch2_btree_build_aux_trees(n1); + + bch2_btree_update_add_new_node(as, n1); + bch2_btree_update_add_new_node(as, n2); six_unlock_write(&n2->c.lock); six_unlock_write(&n1->c.lock); - bch2_btree_node_write(c, n1, SIX_LOCK_intent); - bch2_btree_node_write(c, n2, SIX_LOCK_intent); + path1 = get_unlocked_mut_path(trans, as->btree_id, n1->c.level, n1->key.k.p); + six_lock_increment(&n1->c.lock, SIX_LOCK_intent); + mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED); + bch2_btree_path_level_init(trans, trans->paths + path1, n1); + + path2 = get_unlocked_mut_path(trans, as->btree_id, n2->c.level, n2->key.k.p); + six_lock_increment(&n2->c.lock, SIX_LOCK_intent); + mark_btree_node_locked(trans, trans->paths + path2, n2->c.level, BTREE_NODE_INTENT_LOCKED); + bch2_btree_path_level_init(trans, trans->paths + path2, n2); /* * Note that on recursive parent_keys == keys, so we @@ -1404,22 +1534,40 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, if (!parent) { /* Depth increases, make a new root */ - n3 = __btree_root_alloc(as, b->c.level + 1); + n3 = __btree_root_alloc(as, trans, b->c.level + 1); + + bch2_btree_update_add_new_node(as, n3); + six_unlock_write(&n3->c.lock); + + trans->paths[path2].locks_want++; + BUG_ON(btree_node_locked(trans->paths + path2, n3->c.level)); + six_lock_increment(&n3->c.lock, SIX_LOCK_intent); + mark_btree_node_locked(trans, trans->paths + path2, n3->c.level, BTREE_NODE_INTENT_LOCKED); + bch2_btree_path_level_init(trans, trans->paths + path2, n3); n3->sib_u64s[0] = U16_MAX; n3->sib_u64s[1] = U16_MAX; btree_split_insert_keys(as, trans, path, n3, &as->parent_keys); - - bch2_btree_node_write(c, n3, SIX_LOCK_intent); } } else { - trace_btree_compact(c, b); + trace_and_count(c, btree_node_compact, trans, b); + + n1 = bch2_btree_node_alloc_replacement(as, trans, b); + + if (keys) { + btree_split_insert_keys(as, trans, path, n1, keys); + BUG_ON(!bch2_keylist_empty(keys)); + } bch2_btree_build_aux_trees(n1); + bch2_btree_update_add_new_node(as, n1); six_unlock_write(&n1->c.lock); - bch2_btree_node_write(c, n1, SIX_LOCK_intent); + path1 = get_unlocked_mut_path(trans, as->btree_id, n1->c.level, n1->key.k.p); + six_lock_increment(&n1->c.lock, SIX_LOCK_intent); + mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED); + bch2_btree_path_level_init(trans, trans->paths + path1, n1); if (parent) bch2_keylist_add(&as->parent_keys, &n1->key); @@ -1429,28 +1577,26 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, if (parent) { /* Split a non root node */ - bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags); + ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags); + if (ret) + goto err; } else if (n3) { - bch2_btree_set_root(as, trans, path, n3); + bch2_btree_set_root(as, trans, trans->paths + path, n3); } else { /* Root filled up but didn't need to be split */ - bch2_btree_set_root(as, trans, path, n1); + bch2_btree_set_root(as, trans, trans->paths + path, n1); } - bch2_btree_update_get_open_buckets(as, n1); - if (n2) - bch2_btree_update_get_open_buckets(as, n2); - if (n3) + if (n3) { bch2_btree_update_get_open_buckets(as, n3); - - /* Successful split, update the path to point to the new nodes: */ - - six_lock_increment(&b->c.lock, SIX_LOCK_intent); - if (n3) - bch2_trans_node_add(trans, n3); - if (n2) - bch2_trans_node_add(trans, n2); - bch2_trans_node_add(trans, n1); + bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0); + } + if (n2) { + bch2_btree_update_get_open_buckets(as, n2); + bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0); + } + bch2_btree_update_get_open_buckets(as, n1); + bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0); /* * The old node must be freed (in memory) _before_ unlocking the new @@ -1458,13 +1604,28 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, * node after another thread has locked and updated the new node, thus * seeing stale data: */ - bch2_btree_node_free_inmem(trans, b); + bch2_btree_node_free_inmem(trans, trans->paths + path, b); + + if (n3) + bch2_trans_node_add(trans, trans->paths + path, n3); + if (n2) + bch2_trans_node_add(trans, trans->paths + path2, n2); + bch2_trans_node_add(trans, trans->paths + path1, n1); if (n3) six_unlock_intent(&n3->c.lock); if (n2) six_unlock_intent(&n2->c.lock); six_unlock_intent(&n1->c.lock); +out: + if (path2) { + __bch2_btree_path_unlock(trans, trans->paths + path2); + bch2_path_put(trans, path2, true); + } + if (path1) { + __bch2_btree_path_unlock(trans, trans->paths + path1); + bch2_path_put(trans, path1, true); + } bch2_trans_verify_locks(trans); @@ -1472,6 +1633,14 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, ? BCH_TIME_btree_node_split : BCH_TIME_btree_node_compact], start_time); + return ret; +err: + if (n3) + bch2_btree_node_free_never_used(as, trans, n3); + if (n2) + bch2_btree_node_free_never_used(as, trans, n2); + bch2_btree_node_free_never_used(as, trans, n1); + goto out; } static void @@ -1482,48 +1651,59 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct keylist *keys) { struct btree_path *linked; + unsigned i; __bch2_btree_insert_keys_interior(as, trans, path, b, path->l[b->c.level].iter, keys); btree_update_updated_node(as, b); - trans_for_each_path_with_node(trans, b, linked) + trans_for_each_path_with_node(trans, b, linked, i) bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b); bch2_trans_verify_paths(trans); } /** - * bch_btree_insert_node - insert bkeys into a given btree node + * bch2_btree_insert_node - insert bkeys into a given btree node * - * @iter: btree iterator + * @as: btree_update object + * @trans: btree_trans object + * @path_idx: path that points to current node + * @b: node to insert keys into * @keys: list of keys to insert - * @hook: insert callback - * @persistent: if not null, @persistent will wait on journal write + * @flags: transaction commit flags + * + * Returns: 0 on success, typically transaction restart error on failure * * Inserts as many keys as it can into a given btree node, splitting it if full. * If a split occurred, this function will return early. This can only happen * for leaf nodes -- inserts into interior nodes have to be atomic. */ -static void bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans, - struct btree_path *path, struct btree *b, - struct keylist *keys, unsigned flags) +static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans, + btree_path_idx_t path_idx, struct btree *b, + struct keylist *keys, unsigned flags) { struct bch_fs *c = as->c; + struct btree_path *path = trans->paths + path_idx; int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s); int old_live_u64s = b->nr.live_u64s; int live_u64s_added, u64s_added; + int ret; lockdep_assert_held(&c->gc_lock); - BUG_ON(!btree_node_intent_locked(path, btree_node_root(c, b)->c.level)); + BUG_ON(!btree_node_intent_locked(path, b->c.level)); BUG_ON(!b->c.level); BUG_ON(!as || as->b); bch2_verify_keylist_sorted(keys); - bch2_btree_node_lock_for_insert(trans, path, b); + ret = bch2_btree_node_lock_write(trans, path, &b->c); + if (ret) + return ret; + + bch2_btree_node_prep_for_write(trans, path, b); - if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) { + if (!bch2_btree_node_insert_fits(b, bch2_keylist_u64s(keys))) { bch2_btree_node_unlock_write(trans, path, b); goto split; } @@ -1547,43 +1727,60 @@ static void bch2_btree_insert_node(struct btree_update *as, struct btree_trans * bch2_btree_node_unlock_write(trans, path, b); btree_node_interior_verify(c, b); - return; + return 0; split: - btree_split(as, trans, path, b, keys, flags); + /* + * We could attempt to avoid the transaction restart, by calling + * bch2_btree_path_upgrade() and allocating more nodes: + */ + if (b->c.level >= as->update_level) { + trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b); + return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race); + } + + return btree_split(as, trans, path_idx, b, keys, flags); } int bch2_btree_split_leaf(struct btree_trans *trans, - struct btree_path *path, + btree_path_idx_t path, unsigned flags) { - struct bch_fs *c = trans->c; - struct btree *b = path_l(path)->b; + /* btree_split & merge may both cause paths array to be reallocated */ + + struct btree *b = path_l(trans->paths + path)->b; struct btree_update *as; unsigned l; int ret = 0; - as = bch2_btree_update_start(trans, path, path->level, - btree_update_reserve_required(c, b), flags); + as = bch2_btree_update_start(trans, trans->paths + path, + trans->paths[path].level, + true, flags); if (IS_ERR(as)) return PTR_ERR(as); - btree_split(as, trans, path, b, NULL, flags); - bch2_btree_update_done(as); + ret = btree_split(as, trans, path, b, NULL, flags); + if (ret) { + bch2_btree_update_free(as, trans); + return ret; + } + + bch2_btree_update_done(as, trans); - for (l = path->level + 1; btree_path_node(path, l) && !ret; l++) + for (l = trans->paths[path].level + 1; + btree_node_intent_locked(&trans->paths[path], l) && !ret; + l++) ret = bch2_foreground_maybe_merge(trans, path, l, flags); return ret; } int __bch2_foreground_maybe_merge(struct btree_trans *trans, - struct btree_path *path, + btree_path_idx_t path, unsigned level, unsigned flags, enum btree_node_sibling sib) { struct bch_fs *c = trans->c; - struct btree_path *sib_path = NULL; struct btree_update *as; struct bkey_format_state new_s; struct bkey_format new_f; @@ -1591,16 +1788,18 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, struct btree *b, *m, *n, *prev, *next, *parent; struct bpos sib_pos; size_t sib_u64s; + enum btree_id btree = trans->paths[path].btree_id; + btree_path_idx_t sib_path = 0, new_path = 0; u64 start_time = local_clock(); int ret = 0; - BUG_ON(!path->should_be_locked); - BUG_ON(!btree_node_locked(path, level)); + BUG_ON(!trans->paths[path].should_be_locked); + BUG_ON(!btree_node_locked(&trans->paths[path], level)); - b = path->l[level].b; + b = trans->paths[path].l[level].b; - if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) || - (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) { + if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) || + (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) { b->sib_u64s[sib] = U16_MAX; return 0; } @@ -1609,18 +1808,18 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, ? bpos_predecessor(b->data->min_key) : bpos_successor(b->data->max_key); - sib_path = bch2_path_get(trans, false, path->btree_id, sib_pos, - U8_MAX, level, true, _THIS_IP_); + sib_path = bch2_path_get(trans, btree, sib_pos, + U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_); ret = bch2_btree_path_traverse(trans, sib_path, false); if (ret) goto err; - sib_path->should_be_locked = true; + btree_path_set_should_be_locked(trans->paths + sib_path); - m = sib_path->l[level].b; + m = trans->paths[sib_path].l[level].b; - if (btree_node_parent(path, b) != - btree_node_parent(sib_path, m)) { + if (btree_node_parent(trans->paths + path, b) != + btree_node_parent(trans->paths + sib_path, m)) { b->sib_u64s[sib] = U16_MAX; goto out; } @@ -1633,16 +1832,18 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, next = m; } - if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) { - char buf1[100], buf2[100]; + if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) { + struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; - bch2_bpos_to_text(&PBUF(buf1), prev->data->max_key); - bch2_bpos_to_text(&PBUF(buf2), next->data->min_key); + bch2_bpos_to_text(&buf1, prev->data->max_key); + bch2_bpos_to_text(&buf2, next->data->min_key); bch_err(c, - "btree topology error in btree merge:\n" + "%s(): btree topology error:\n" " prev ends at %s\n" " next starts at %s", - buf1, buf2); + __func__, buf1.buf, buf2.buf); + printbuf_exit(&buf1); + printbuf_exit(&buf2); bch2_topology_error(c); ret = -EIO; goto err; @@ -1655,8 +1856,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, bch2_bkey_format_add_pos(&new_s, next->data->max_key); new_f = bch2_bkey_format_done(&new_s); - sib_u64s = btree_node_u64s_with_format(b, &new_f) + - btree_node_u64s_with_format(m, &new_f); + sib_u64s = btree_node_u64s_with_format(b->nr, &b->format, &new_f) + + btree_node_u64s_with_format(m->nr, &m->format, &new_f); if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) { sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c); @@ -1671,37 +1872,41 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold) goto out; - parent = btree_node_parent(path, b); - as = bch2_btree_update_start(trans, path, level, - btree_update_reserve_required(c, parent) + 1, - flags| - BTREE_INSERT_NOFAIL| - BTREE_INSERT_USE_RESERVE); + parent = btree_node_parent(trans->paths + path, b); + as = bch2_btree_update_start(trans, trans->paths + path, level, false, + BCH_TRANS_COMMIT_no_enospc|flags); ret = PTR_ERR_OR_ZERO(as); if (ret) goto err; - trace_btree_merge(c, b); + trace_and_count(c, btree_node_merge, trans, b); bch2_btree_interior_update_will_free_node(as, b); bch2_btree_interior_update_will_free_node(as, m); - n = bch2_btree_node_alloc(as, b->c.level); - bch2_btree_update_add_new_node(as, n); + n = bch2_btree_node_alloc(as, trans, b->c.level); + + SET_BTREE_NODE_SEQ(n->data, + max(BTREE_NODE_SEQ(b->data), + BTREE_NODE_SEQ(m->data)) + 1); btree_set_min(n, prev->data->min_key); btree_set_max(n, next->data->max_key); - n->data->format = new_f; + n->data->format = new_f; btree_node_set_format(n, new_f); bch2_btree_sort_into(c, n, prev); bch2_btree_sort_into(c, n, next); bch2_btree_build_aux_trees(n); + bch2_btree_update_add_new_node(as, n); six_unlock_write(&n->c.lock); - bch2_btree_node_write(c, n, SIX_LOCK_intent); + new_path = get_unlocked_mut_path(trans, btree, n->c.level, n->key.k.p); + six_lock_increment(&n->c.lock, SIX_LOCK_intent); + mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED); + bch2_btree_path_level_init(trans, trans->paths + new_path, n); bkey_init(&delete.k); delete.k.p = prev->key.k.p; @@ -1710,37 +1915,40 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, bch2_trans_verify_paths(trans); - bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags); + ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags); + if (ret) + goto err_free_update; bch2_trans_verify_paths(trans); bch2_btree_update_get_open_buckets(as, n); + bch2_btree_node_write(c, n, SIX_LOCK_intent, 0); - six_lock_increment(&b->c.lock, SIX_LOCK_intent); - six_lock_increment(&m->c.lock, SIX_LOCK_intent); + bch2_btree_node_free_inmem(trans, trans->paths + path, b); + bch2_btree_node_free_inmem(trans, trans->paths + sib_path, m); - bch2_trans_node_add(trans, n); + bch2_trans_node_add(trans, trans->paths + path, n); bch2_trans_verify_paths(trans); - bch2_btree_node_free_inmem(trans, b); - bch2_btree_node_free_inmem(trans, m); - six_unlock_intent(&n->c.lock); - bch2_btree_update_done(as); + bch2_btree_update_done(as, trans); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_merge], start_time); out: err: + if (new_path) + bch2_path_put(trans, new_path, true); bch2_path_put(trans, sib_path, true); bch2_trans_verify_locks(trans); return ret; +err_free_update: + bch2_btree_node_free_never_used(as, trans, n); + bch2_btree_update_free(as, trans); + goto out; } -/** - * bch_btree_node_rewrite - Rewrite/move a btree node - */ int bch2_btree_node_rewrite(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, @@ -1749,58 +1957,67 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, struct bch_fs *c = trans->c; struct btree *n, *parent; struct btree_update *as; + btree_path_idx_t new_path = 0; int ret; - flags |= BTREE_INSERT_NOFAIL; + flags |= BCH_TRANS_COMMIT_no_enospc; - parent = btree_node_parent(iter->path, b); - as = bch2_btree_update_start(trans, iter->path, b->c.level, - (parent - ? btree_update_reserve_required(c, parent) - : 0) + 1, - flags); + struct btree_path *path = btree_iter_path(trans, iter); + parent = btree_node_parent(path, b); + as = bch2_btree_update_start(trans, path, b->c.level, false, flags); ret = PTR_ERR_OR_ZERO(as); - if (ret) { - trace_btree_gc_rewrite_node_fail(c, b); + if (ret) goto out; - } bch2_btree_interior_update_will_free_node(as, b); - n = bch2_btree_node_alloc_replacement(as, b); - bch2_btree_update_add_new_node(as, n); + n = bch2_btree_node_alloc_replacement(as, trans, b); bch2_btree_build_aux_trees(n); + bch2_btree_update_add_new_node(as, n); six_unlock_write(&n->c.lock); - trace_btree_gc_rewrite_node(c, b); + new_path = get_unlocked_mut_path(trans, iter->btree_id, n->c.level, n->key.k.p); + six_lock_increment(&n->c.lock, SIX_LOCK_intent); + mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED); + bch2_btree_path_level_init(trans, trans->paths + new_path, n); - bch2_btree_node_write(c, n, SIX_LOCK_intent); + trace_and_count(c, btree_node_rewrite, trans, b); if (parent) { bch2_keylist_add(&as->parent_keys, &n->key); - bch2_btree_insert_node(as, trans, iter->path, parent, - &as->parent_keys, flags); + ret = bch2_btree_insert_node(as, trans, iter->path, + parent, &as->parent_keys, flags); + if (ret) + goto err; } else { - bch2_btree_set_root(as, trans, iter->path, n); + bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n); } bch2_btree_update_get_open_buckets(as, n); + bch2_btree_node_write(c, n, SIX_LOCK_intent, 0); + + bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b); - six_lock_increment(&b->c.lock, SIX_LOCK_intent); - bch2_trans_node_add(trans, n); - bch2_btree_node_free_inmem(trans, b); + bch2_trans_node_add(trans, trans->paths + iter->path, n); six_unlock_intent(&n->c.lock); - bch2_btree_update_done(as); + bch2_btree_update_done(as, trans); out: - bch2_btree_path_downgrade(iter->path); + if (new_path) + bch2_path_put(trans, new_path, true); + bch2_trans_downgrade(trans); return ret; +err: + bch2_btree_node_free_never_used(as, trans, n); + bch2_btree_update_free(as, trans); + goto out; } struct async_btree_rewrite { struct bch_fs *c; struct work_struct work; + struct list_head list; enum btree_id btree_id; unsigned level; struct bpos pos; @@ -1810,6 +2027,7 @@ struct async_btree_rewrite { static int async_btree_node_rewrite_trans(struct btree_trans *trans, struct async_btree_rewrite *a) { + struct bch_fs *c = trans->c; struct btree_iter iter; struct btree *b; int ret; @@ -1821,41 +2039,48 @@ static int async_btree_node_rewrite_trans(struct btree_trans *trans, if (ret) goto out; - if (!b || b->data->keys.seq != a->seq) + if (!b || b->data->keys.seq != a->seq) { + struct printbuf buf = PRINTBUF; + + if (b) + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + else + prt_str(&buf, "(null"); + bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s", + __func__, a->seq, buf.buf); + printbuf_exit(&buf); goto out; + } ret = bch2_btree_node_rewrite(trans, &iter, b, 0); -out : +out: bch2_trans_iter_exit(trans, &iter); return ret; } -void async_btree_node_rewrite_work(struct work_struct *work) +static void async_btree_node_rewrite_work(struct work_struct *work) { struct async_btree_rewrite *a = container_of(work, struct async_btree_rewrite, work); struct bch_fs *c = a->c; + int ret; - bch2_trans_do(c, NULL, NULL, 0, - async_btree_node_rewrite_trans(&trans, a)); - percpu_ref_put(&c->writes); + ret = bch2_trans_do(c, NULL, NULL, 0, + async_btree_node_rewrite_trans(trans, a)); + bch_err_fn(c, ret); + bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite); kfree(a); } void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) { struct async_btree_rewrite *a; - - if (!test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags)) - return; - - if (!percpu_ref_tryget(&c->writes)) - return; + int ret; a = kmalloc(sizeof(*a), GFP_NOFS); if (!a) { - percpu_ref_put(&c->writes); + bch_err(c, "%s: error allocating memory", __func__); return; } @@ -1864,35 +2089,81 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) a->level = b->c.level; a->pos = b->key.k.p; a->seq = b->data->keys.seq; - INIT_WORK(&a->work, async_btree_node_rewrite_work); + + if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) { + mutex_lock(&c->pending_node_rewrites_lock); + list_add(&a->list, &c->pending_node_rewrites); + mutex_unlock(&c->pending_node_rewrites_lock); + return; + } + + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) { + if (test_bit(BCH_FS_started, &c->flags)) { + bch_err(c, "%s: error getting c->writes ref", __func__); + kfree(a); + return; + } + + ret = bch2_fs_read_write_early(c); + bch_err_msg(c, ret, "going read-write"); + if (ret) { + kfree(a); + return; + } + + bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); + } + queue_work(c->btree_interior_update_worker, &a->work); } +void bch2_do_pending_node_rewrites(struct bch_fs *c) +{ + struct async_btree_rewrite *a, *n; + + mutex_lock(&c->pending_node_rewrites_lock); + list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { + list_del(&a->list); + + bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); + queue_work(c->btree_interior_update_worker, &a->work); + } + mutex_unlock(&c->pending_node_rewrites_lock); +} + +void bch2_free_pending_node_rewrites(struct bch_fs *c) +{ + struct async_btree_rewrite *a, *n; + + mutex_lock(&c->pending_node_rewrites_lock); + list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { + list_del(&a->list); + + kfree(a); + } + mutex_unlock(&c->pending_node_rewrites_lock); +} + static int __bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, struct btree *new_hash, struct bkey_i *new_key, + unsigned commit_flags, bool skip_triggers) { struct bch_fs *c = trans->c; struct btree_iter iter2 = { NULL }; struct btree *parent; - u64 journal_entries[BKEY_BTREE_PTR_U64s_MAX]; int ret; if (!skip_triggers) { - ret = bch2_trans_mark_key(trans, - bkey_s_c_null, - bkey_i_to_s_c(new_key), - BTREE_TRIGGER_INSERT); - if (ret) - return ret; - - ret = bch2_trans_mark_key(trans, - bkey_i_to_s_c(&b->key), - bkey_s_c_null, - BTREE_TRIGGER_OVERWRITE); + ret = bch2_key_trigger_old(trans, b->c.btree_id, b->c.level + 1, + bkey_i_to_s_c(&b->key), + BTREE_TRIGGER_TRANSACTIONAL) ?: + bch2_key_trigger_new(trans, b->c.btree_id, b->c.level + 1, + bkey_i_to_s(new_key), + BTREE_TRIGGER_TRANSACTIONAL); if (ret) return ret; } @@ -1904,7 +2175,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, BUG_ON(ret); } - parent = btree_node_parent(iter->path, b); + parent = btree_node_parent(btree_iter_path(trans, iter), b); if (parent) { bch2_trans_copy_iter(&iter2, iter); @@ -1912,12 +2183,13 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, iter2.flags & BTREE_ITER_INTENT, _THIS_IP_); - BUG_ON(iter2.path->level != b->c.level); - BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p)); + struct btree_path *path2 = btree_iter_path(trans, &iter2); + BUG_ON(path2->level != b->c.level); + BUG_ON(!bpos_eq(path2->pos, new_key->k.p)); + + btree_path_set_level_up(trans, path2); - btree_node_unlock(iter2.path, iter2.path->level); - path_l(iter2.path)->b = BTREE_ITER_NO_NODE_UP; - iter2.path->level++; + trans->paths_sorted = false; ret = bch2_btree_iter_traverse(&iter2) ?: bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN); @@ -1926,23 +2198,23 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, } else { BUG_ON(btree_node_root(c, b) != b); - trans->extra_journal_entries = (void *) &journal_entries[0]; - trans->extra_journal_entry_u64s = - journal_entry_set((void *) &journal_entries[0], - BCH_JSET_ENTRY_btree_root, - b->c.btree_id, b->c.level, - new_key, new_key->k.u64s); + struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, + jset_u64s(new_key->k.u64s)); + ret = PTR_ERR_OR_ZERO(e); + if (ret) + return ret; + + journal_entry_set(e, + BCH_JSET_ENTRY_btree_root, + b->c.btree_id, b->c.level, + new_key, new_key->k.u64s); } - ret = bch2_trans_commit(trans, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_NOCHECK_RW| - BTREE_INSERT_JOURNAL_RECLAIM| - BTREE_INSERT_JOURNAL_RESERVED); + ret = bch2_trans_commit(trans, NULL, NULL, commit_flags); if (ret) goto err; - bch2_btree_node_lock_write(trans, iter->path, b); + bch2_btree_node_lock_write_nofail(trans, btree_iter_path(trans, iter), &b->c); if (new_hash) { mutex_lock(&c->btree_cache.lock); @@ -1957,7 +2229,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, bkey_copy(&b->key, new_key); } - bch2_btree_node_unlock_write(trans, iter->path, b); + bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b); out: bch2_trans_iter_exit(trans, &iter2); return ret; @@ -1972,19 +2244,17 @@ err: int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, struct bkey_i *new_key, - bool skip_triggers) + unsigned commit_flags, bool skip_triggers) { struct bch_fs *c = trans->c; struct btree *new_hash = NULL; - struct btree_path *path = iter->path; + struct btree_path *path = btree_iter_path(trans, iter); struct closure cl; int ret = 0; - if (!btree_node_intent_locked(path, b->c.level) && - !bch2_btree_path_upgrade(trans, path, b->c.level + 1)) { - btree_trans_restart(trans); - return -EINTR; - } + ret = bch2_btree_path_upgrade(trans, path, b->c.level + 1); + if (ret) + return ret; closure_init_stack(&cl); @@ -1993,20 +2263,19 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite * btree_iter_traverse(): */ if (btree_ptr_hash_val(new_key) != b->hash_val) { - ret = bch2_btree_cache_cannibalize_lock(c, &cl); + ret = bch2_btree_cache_cannibalize_lock(trans, &cl); if (ret) { - bch2_trans_unlock(trans); - closure_sync(&cl); - if (!bch2_trans_relock(trans)) - return -EINTR; + ret = drop_locks_do(trans, (closure_sync(&cl), 0)); + if (ret) + return ret; } - new_hash = bch2_btree_node_mem_alloc(c); + new_hash = bch2_btree_node_mem_alloc(trans, false); } path->intent_ref++; - ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, - new_key, skip_triggers); + ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key, + commit_flags, skip_triggers); --path->intent_ref; if (new_hash) { @@ -2018,13 +2287,13 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite six_unlock_intent(&new_hash->c.lock); } closure_sync(&cl); - bch2_btree_cache_cannibalize_unlock(c); + bch2_btree_cache_cannibalize_unlock(trans); return ret; } int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, struct btree *b, struct bkey_i *new_key, - bool skip_triggers) + unsigned commit_flags, bool skip_triggers) { struct btree_iter iter; int ret; @@ -2037,7 +2306,7 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, goto out; /* has node been freed? */ - if (iter.path->l[b->c.level].b != b) { + if (btree_iter_path(trans, &iter)->l[b->c.level].b != b) { /* node has been freed: */ BUG_ON(!btree_node_dying(b)); goto out; @@ -2045,7 +2314,12 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, BUG_ON(!btree_node_hashed(b)); - ret = bch2_btree_node_update_key(trans, &iter, b, new_key, skip_triggers); + struct bch_extent_ptr *ptr; + bch2_bkey_drop_ptrs(bkey_i_to_s(new_key), ptr, + !bch2_bkey_has_device(bkey_i_to_s(&b->key), ptr->dev)); + + ret = bch2_btree_node_update_key(trans, &iter, b, new_key, + commit_flags, skip_triggers); out: bch2_trans_iter_exit(trans, &iter); return ret; @@ -2064,8 +2338,9 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b) bch2_btree_set_root_inmem(c, b); } -void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) +static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id) { + struct bch_fs *c = trans->c; struct closure cl; struct btree *b; int ret; @@ -2073,12 +2348,12 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) closure_init_stack(&cl); do { - ret = bch2_btree_cache_cannibalize_lock(c, &cl); + ret = bch2_btree_cache_cannibalize_lock(trans, &cl); closure_sync(&cl); } while (ret); - b = bch2_btree_node_mem_alloc(c); - bch2_btree_cache_cannibalize_unlock(c); + b = bch2_btree_node_mem_alloc(trans, false); + bch2_btree_cache_cannibalize_unlock(trans); set_btree_node_fake(b); set_btree_node_need_rewrite(b); @@ -2106,6 +2381,12 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); + return 0; +} + +void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) +{ + bch2_trans_run(c, __bch2_btree_root_alloc(trans, id)); } void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c) @@ -2114,70 +2395,67 @@ void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c) mutex_lock(&c->btree_interior_update_lock); list_for_each_entry(as, &c->btree_interior_update_list, list) - pr_buf(out, "%p m %u w %u r %u j %llu\n", + prt_printf(out, "%p m %u w %u r %u j %llu\n", as, as->mode, as->nodes_written, - atomic_read(&as->cl.remaining) & CLOSURE_REMAINING_MASK, + closure_nr_remaining(&as->cl), as->journal.seq); mutex_unlock(&c->btree_interior_update_lock); } -size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *c) +static bool bch2_btree_interior_updates_pending(struct bch_fs *c) { - size_t ret = 0; - struct list_head *i; + bool ret; mutex_lock(&c->btree_interior_update_lock); - list_for_each(i, &c->btree_interior_update_list) - ret++; + ret = !list_empty(&c->btree_interior_update_list); mutex_unlock(&c->btree_interior_update_lock); return ret; } -void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset) +bool bch2_btree_interior_updates_flush(struct bch_fs *c) +{ + bool ret = bch2_btree_interior_updates_pending(c); + + if (ret) + closure_wait_event(&c->btree_interior_update_wait, + !bch2_btree_interior_updates_pending(c)); + return ret; +} + +void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry) { - struct btree_root *r; - struct jset_entry *entry; + struct btree_root *r = bch2_btree_id_root(c, entry->btree_id); mutex_lock(&c->btree_root_lock); - vstruct_for_each(jset, entry) - if (entry->type == BCH_JSET_ENTRY_btree_root) { - r = &c->btree_roots[entry->btree_id]; - r->level = entry->level; - r->alive = true; - bkey_copy(&r->key, &entry->start[0]); - } + r->level = entry->level; + r->alive = true; + bkey_copy(&r->key, (struct bkey_i *) entry->start); mutex_unlock(&c->btree_root_lock); } struct jset_entry * bch2_btree_roots_to_journal_entries(struct bch_fs *c, - struct jset_entry *start, - struct jset_entry *end) + struct jset_entry *end, + unsigned long skip) { - struct jset_entry *entry; - unsigned long have = 0; unsigned i; - for (entry = start; entry < end; entry = vstruct_next(entry)) - if (entry->type == BCH_JSET_ENTRY_btree_root) - __set_bit(entry->btree_id, &have); - mutex_lock(&c->btree_root_lock); - for (i = 0; i < BTREE_ID_NR; i++) - if (c->btree_roots[i].alive && !test_bit(i, &have)) { - journal_entry_set(end, - BCH_JSET_ENTRY_btree_root, - i, c->btree_roots[i].level, - &c->btree_roots[i].key, - c->btree_roots[i].key.u64s); + for (i = 0; i < btree_id_nr_alive(c); i++) { + struct btree_root *r = bch2_btree_id_root(c, i); + + if (r->alive && !test_bit(i, &skip)) { + journal_entry_set(end, BCH_JSET_ENTRY_btree_root, + i, r->level, &r->key, r->key.k.u64s); end = vstruct_next(end); } + } mutex_unlock(&c->btree_root_lock); @@ -2191,7 +2469,7 @@ void bch2_fs_btree_interior_update_exit(struct bch_fs *c) mempool_exit(&c->btree_interior_update_pool); } -int bch2_fs_btree_interior_update_init(struct bch_fs *c) +void bch2_fs_btree_interior_update_init_early(struct bch_fs *c) { mutex_init(&c->btree_reserve_cache_lock); INIT_LIST_HEAD(&c->btree_interior_update_list); @@ -2199,11 +2477,20 @@ int bch2_fs_btree_interior_update_init(struct bch_fs *c) mutex_init(&c->btree_interior_update_lock); INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work); + INIT_LIST_HEAD(&c->pending_node_rewrites); + mutex_init(&c->pending_node_rewrites_lock); +} + +int bch2_fs_btree_interior_update_init(struct bch_fs *c) +{ c->btree_interior_update_worker = alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1); if (!c->btree_interior_update_worker) - return -ENOMEM; + return -BCH_ERR_ENOMEM_btree_interior_update_worker_init; - return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, - sizeof(struct btree_update)); + if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, + sizeof(struct btree_update))) + return -BCH_ERR_ENOMEM_btree_interior_update_pool_init; + + return 0; }