X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_update_interior.c;h=9affcb22d9cb7025453cfd528f13738ff9514879;hb=7fd6c3ffe45b3b42c0bc8a8c5d1387a5e3316a54;hp=a49e7b6b416d95355dc3a929a6c9e6b4f00e03b3;hpb=42cf74fd1d0ef58927967e6236988e86cfc0d086;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c index a49e7b6..9affcb2 100644 --- a/libbcachefs/btree_update_interior.c +++ b/libbcachefs/btree_update_interior.c @@ -5,23 +5,24 @@ #include "bkey_methods.h" #include "btree_cache.h" #include "btree_gc.h" +#include "btree_journal_iter.h" #include "btree_update.h" #include "btree_update_interior.h" #include "btree_io.h" #include "btree_iter.h" #include "btree_locking.h" #include "buckets.h" +#include "clock.h" #include "error.h" #include "extents.h" #include "journal.h" #include "journal_reclaim.h" #include "keylist.h" -#include "recovery.h" #include "replicas.h" #include "super-io.h" +#include "trace.h" #include -#include static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *, struct btree_path *, struct btree *, @@ -142,10 +143,15 @@ static size_t btree_node_u64s_with_format(struct btree *b, } /** - * btree_node_format_fits - check if we could rewrite node with a new format + * bch2_btree_node_format_fits - check if we could rewrite node with a new format * - * This assumes all keys can pack with the new format -- it just checks if - * the re-packed keys would fit inside the node itself. + * @c: filesystem handle + * @b: btree node to rewrite + * @new_f: bkey format to translate keys to + * + * Returns: true if all re-packed keys will be able to fit in a new node. + * + * Assumes all keys will successfully pack with the new format. */ bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, struct bkey_format *new_f) @@ -161,6 +167,7 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b) { trace_and_count(c, btree_node_free, c, b); + BUG_ON(btree_node_write_blocked(b)); BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_need_write(b)); BUG_ON(b == btree_node_root(c, b)); @@ -186,7 +193,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, bch2_btree_node_hash_remove(&c->btree_cache, b); __btree_node_free(c, b); six_unlock_write(&b->c.lock); - mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); trans_for_each_path(trans, path) if (path->l[level].b == b) { @@ -241,27 +248,21 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, struct bch_fs *c = trans->c; struct write_point *wp; struct btree *b; - __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; - struct open_buckets ob = { .nr = 0 }; + BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; + struct open_buckets obs = { .nr = 0 }; struct bch_devs_list devs_have = (struct bch_devs_list) { 0 }; - unsigned nr_reserve; - enum alloc_reserve alloc_reserve; + enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; + unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim + ? BTREE_NODE_RESERVE + : 0; int ret; - if (flags & BTREE_INSERT_USE_RESERVE) { - nr_reserve = 0; - alloc_reserve = RESERVE_btree_movinggc; - } else { - nr_reserve = BTREE_NODE_RESERVE; - alloc_reserve = RESERVE_btree; - } - mutex_lock(&c->btree_reserve_cache_lock); if (c->btree_reserve_cache_nr > nr_reserve) { struct btree_alloc *a = &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; - ob = a->ob; + obs = a->ob; bkey_copy(&tmp.k, &a->k); mutex_unlock(&c->btree_reserve_cache_lock); goto mem_alloc; @@ -277,7 +278,7 @@ retry: &devs_have, res->nr_replicas, c->opts.metadata_replicas_required, - alloc_reserve, 0, cl, &wp); + watermark, 0, cl, &wp); if (unlikely(ret)) return ERR_PTR(ret); @@ -296,10 +297,10 @@ retry: bkey_btree_ptr_v2_init(&tmp.k); bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false); - bch2_open_bucket_get(c, wp, &ob); + bch2_open_bucket_get(c, wp, &obs); bch2_alloc_sectors_done(c, wp); mem_alloc: - b = bch2_btree_node_mem_alloc(c, interior_node); + b = bch2_btree_node_mem_alloc(trans, interior_node); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); @@ -308,7 +309,7 @@ mem_alloc: BUG_ON(b->ob.nr); bkey_copy(&b->key, &tmp.k); - b->ob = ob; + b->ob = obs; return b; } @@ -362,6 +363,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, BUG_ON(ret); trace_and_count(c, btree_node_alloc, c, b); + bch2_increment_clock(c, btree_sectors(c), WRITE); return b; } @@ -473,9 +475,6 @@ static int bch2_btree_reserve_get(struct btree_trans *trans, /* * Protects reaping from the btree node cache and using the btree node * open bucket reserve: - * - * BTREE_INSERT_NOWAIT only applies to btree node allocation, not - * blocking on this lock: */ ret = bch2_btree_cache_cannibalize_lock(c, cl); if (ret) @@ -485,9 +484,8 @@ static int bch2_btree_reserve_get(struct btree_trans *trans, struct prealloc_nodes *p = as->prealloc_nodes + interior; while (p->nr < nr_nodes[interior]) { - b = __bch2_btree_node_alloc(trans, &as->disk_res, - flags & BTREE_INSERT_NOWAIT ? NULL : cl, - interior, flags); + b = __bch2_btree_node_alloc(trans, &as->disk_res, cl, + interior, flags); if (IS_ERR(b)) { ret = PTR_ERR(b); goto err; @@ -511,8 +509,6 @@ static void bch2_btree_update_free(struct btree_update *as, struct btree_trans * up_read(&c->gc_lock); as->took_gc_lock = false; - bch2_journal_preres_put(&c->journal, &as->journal_preres); - bch2_journal_pin_drop(&c->journal, &as->journal); bch2_journal_pin_flush(&c->journal, &as->journal); bch2_disk_reservation_put(c, &as->disk_res); @@ -595,12 +591,11 @@ static void btree_update_nodes_written(struct btree_update *as) { struct bch_fs *c = as->c; struct btree *b; - struct btree_trans trans; + struct btree_trans *trans = bch2_trans_get(c); u64 journal_seq = 0; unsigned i; int ret; - bch2_trans_init(&trans, c, 0, 512); /* * If we're already in an error state, it might be because a btree node * was never written, and we might be trying to free that same btree @@ -621,7 +616,7 @@ static void btree_update_nodes_written(struct btree_update *as) b = as->old_nodes[i]; - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); seq = b->data ? b->data->keys.seq : 0; six_unlock_read(&b->c.lock); @@ -643,14 +638,13 @@ static void btree_update_nodes_written(struct btree_update *as) * journal reclaim does btree updates when flushing bkey_cached entries, * which may require allocations as well. */ - ret = commit_do(&trans, &as->disk_res, &journal_seq, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_NOCHECK_RW| - BTREE_INSERT_USE_RESERVE| - BTREE_INSERT_JOURNAL_RECLAIM| - JOURNAL_WATERMARK_reserved, - btree_update_nodes_written_trans(&trans, as)); - bch2_trans_unlock(&trans); + ret = commit_do(trans, &as->disk_res, &journal_seq, + BCH_WATERMARK_reclaim| + BCH_TRANS_COMMIT_no_enospc| + BCH_TRANS_COMMIT_no_check_rw| + BCH_TRANS_COMMIT_journal_reclaim, + btree_update_nodes_written_trans(trans, as)); + bch2_trans_unlock(trans); bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c, "%s(): error %s", __func__, bch2_err_str(ret)); @@ -659,7 +653,7 @@ err: struct btree_path *path; b = as->b; - path = get_unlocked_mut_path(&trans, as->btree_id, b->c.level, b->key.k.p); + path = get_unlocked_mut_path(trans, as->btree_id, b->c.level, b->key.k.p); /* * @b is the node we did the final insert into: * @@ -682,12 +676,13 @@ err: * we may rarely end up with a locked path besides the one we * have here: */ - bch2_trans_unlock(&trans); - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_intent); - mark_btree_node_locked(&trans, path, b->c.level, SIX_LOCK_intent); - bch2_btree_path_level_init(&trans, path, b); + bch2_trans_unlock(trans); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); + mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); + path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); + path->l[b->c.level].b = b; - bch2_btree_node_lock_write_nofail(&trans, path, &b->c); + bch2_btree_node_lock_write_nofail(trans, path, &b->c); mutex_lock(&c->btree_interior_update_lock); @@ -700,15 +695,15 @@ err: * btree_interior_update_lock: */ if (as->b == b) { - struct bset *i = btree_bset_last(b); - BUG_ON(!b->c.level); BUG_ON(!btree_node_dirty(b)); if (!ret) { - i->journal_seq = cpu_to_le64( + struct bset *last = btree_bset_last(b); + + last->journal_seq = cpu_to_le64( max(journal_seq, - le64_to_cpu(i->journal_seq))); + le64_to_cpu(last->journal_seq))); bch2_btree_add_journal_pin(c, b, journal_seq); } else { @@ -723,18 +718,16 @@ err: mutex_unlock(&c->btree_interior_update_lock); - mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); six_unlock_write(&b->c.lock); btree_node_write_if_need(c, b, SIX_LOCK_intent); - btree_node_unlock(&trans, path, b->c.level); - bch2_path_put(&trans, path, true); + btree_node_unlock(trans, path, b->c.level); + bch2_path_put(trans, path, true); } bch2_journal_pin_drop(&c->journal, &as->journal); - bch2_journal_preres_put(&c->journal, &as->journal_preres); - mutex_lock(&c->btree_interior_update_lock); for (i = 0; i < as->nr_new_nodes; i++) { b = as->new_nodes[i]; @@ -748,7 +741,7 @@ err: for (i = 0; i < as->nr_new_nodes; i++) { b = as->new_nodes[i]; - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); btree_node_write_if_need(c, b, SIX_LOCK_read); six_unlock_read(&b->c.lock); } @@ -756,8 +749,8 @@ err: for (i = 0; i < as->nr_open_buckets; i++) bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]); - bch2_btree_update_free(as, &trans); - bch2_trans_exit(&trans); + bch2_btree_update_free(as, trans); + bch2_trans_put(trans); } static void btree_interior_update_work(struct work_struct *work) @@ -806,6 +799,7 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b) BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE); BUG_ON(!btree_node_dirty(b)); + BUG_ON(!b->c.level); as->mode = BTREE_INTERIOR_UPDATING_NODE; as->b = b; @@ -816,6 +810,12 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b) mutex_unlock(&c->btree_interior_update_lock); } +static int bch2_update_reparent_journal_pin_flush(struct journal *j, + struct journal_entry_pin *_pin, u64 seq) +{ + return 0; +} + static void btree_update_reparent(struct btree_update *as, struct btree_update *child) { @@ -826,7 +826,8 @@ static void btree_update_reparent(struct btree_update *as, child->b = NULL; child->mode = BTREE_INTERIOR_UPDATING_AS; - bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, NULL); + bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, + bch2_update_reparent_journal_pin_flush); } static void btree_update_updated_root(struct btree_update *as, struct btree *b) @@ -935,6 +936,12 @@ static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct b b->ob.v[--b->ob.nr]; } +static int bch2_btree_update_will_free_node_journal_pin_flush(struct journal *j, + struct journal_entry_pin *_pin, u64 seq) +{ + return 0; +} + /* * @b is being split/rewritten: it may have pointers to not-yet-written btree * nodes and thus outstanding btree_updates - redirect @b's @@ -975,6 +982,7 @@ static void bch2_btree_interior_update_will_free_node(struct btree_update *as, clear_btree_node_dirty_acct(c, b); clear_btree_node_need_write(b); + clear_btree_node_write_blocked(b); /* * Does this node have unwritten data that has a pin on the journal? @@ -985,11 +993,13 @@ static void bch2_btree_interior_update_will_free_node(struct btree_update *as, * when the new nodes are persistent and reachable on disk: */ w = btree_current_write(b); - bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL); + bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, + bch2_btree_update_will_free_node_journal_pin_flush); bch2_journal_pin_drop(&c->journal, &w->journal); w = btree_prev_write(b); - bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL); + bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, + bch2_btree_update_will_free_node_journal_pin_flush); bch2_journal_pin_drop(&c->journal, &w->journal); mutex_unlock(&c->btree_interior_update_lock); @@ -1039,18 +1049,28 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, struct bch_fs *c = trans->c; struct btree_update *as; u64 start_time = local_clock(); - int disk_res_flags = (flags & BTREE_INSERT_NOFAIL) + int disk_res_flags = (flags & BCH_TRANS_COMMIT_no_enospc) ? BCH_DISK_RESERVATION_NOFAIL : 0; unsigned nr_nodes[2] = { 0, 0 }; unsigned update_level = level; - int journal_flags = flags & JOURNAL_WATERMARK_MASK; + enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; + unsigned journal_flags = 0; int ret = 0; u32 restart_count = trans->restart_count; BUG_ON(!path->should_be_locked); - if (flags & BTREE_INSERT_JOURNAL_RECLAIM) + if (watermark == BCH_WATERMARK_copygc) + watermark = BCH_WATERMARK_btree_copygc; + if (watermark < BCH_WATERMARK_btree) + watermark = BCH_WATERMARK_btree; + + flags &= ~BCH_WATERMARK_MASK; + flags |= watermark; + + if (flags & BCH_TRANS_COMMIT_journal_reclaim) journal_flags |= JOURNAL_RES_GET_NONBLOCK; + journal_flags |= watermark; while (1) { nr_nodes[!!update_level] += 1 + split; @@ -1071,28 +1091,24 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, BKEY_BTREE_PTR_U64s_MAX * (1 + split))) break; - split = true; + split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c); } - if (flags & BTREE_INSERT_GC_LOCK_HELD) - lockdep_assert_held(&c->gc_lock); - else if (!down_read_trylock(&c->gc_lock)) { - bch2_trans_unlock(trans); - down_read(&c->gc_lock); - ret = bch2_trans_relock(trans); + if (!down_read_trylock(&c->gc_lock)) { + ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0)); if (ret) { up_read(&c->gc_lock); return ERR_PTR(ret); } } - as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO); + as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS); memset(as, 0, sizeof(*as)); closure_init(&as->cl, NULL); as->c = c; as->start_time = start_time; as->mode = BTREE_INTERIOR_NO_UPDATE; - as->took_gc_lock = !(flags & BTREE_INSERT_GC_LOCK_HELD); + as->took_gc_lock = true; as->btree_id = path->btree_id; as->update_level = update_level; INIT_LIST_HEAD(&as->list); @@ -1118,31 +1134,6 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, if (ret) goto err; - ret = bch2_journal_preres_get(&c->journal, &as->journal_preres, - BTREE_UPDATE_JOURNAL_RES, - journal_flags|JOURNAL_RES_GET_NONBLOCK); - if (ret) { - bch2_trans_unlock(trans); - - if (flags & BTREE_INSERT_JOURNAL_RECLAIM) { - ret = -BCH_ERR_journal_reclaim_would_deadlock; - goto err; - } - - ret = bch2_journal_preres_get(&c->journal, &as->journal_preres, - BTREE_UPDATE_JOURNAL_RES, - journal_flags); - if (ret) { - trace_and_count(c, trans_restart_journal_preres_get, trans, _RET_IP_, journal_flags); - ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get); - goto err; - } - - ret = bch2_trans_relock(trans); - if (ret) - goto err; - } - ret = bch2_disk_reservation_get(c, &as->disk_res, (nr_nodes[0] + nr_nodes[1]) * btree_sectors(c), c->opts.metadata_replicas, @@ -1155,6 +1146,17 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, bch2_err_matches(ret, ENOMEM)) { struct closure cl; + /* + * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK + * flag + */ + if (bch2_err_matches(ret, ENOSPC) && + (flags & BCH_TRANS_COMMIT_journal_reclaim) && + watermark != BCH_WATERMARK_reclaim) { + ret = -BCH_ERR_journal_reclaim_would_deadlock; + goto err; + } + closure_init_stack(&cl); do { @@ -1196,24 +1198,12 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b) (b->c.level < btree_node_root(c, b)->c.level || !btree_node_dying(btree_node_root(c, b)))); - btree_node_root(c, b) = b; + bch2_btree_id_root(c, b->c.btree_id)->b = b; mutex_unlock(&c->btree_root_lock); bch2_recalc_btree_reserve(c); } -/** - * bch_btree_set_root - update the root in memory and on disk - * - * To ensure forward progress, the current task must not be holding any - * btree node write locks. However, you must hold an intent lock on the - * old root. - * - * Note: This allocates a journal entry but doesn't add any keys to - * it. All the btree roots are part of every journal write, so there - * is nothing new to be done. This just guarantees that there is a - * journal write. - */ static void bch2_btree_set_root(struct btree_update *as, struct btree_trans *trans, struct btree_path *path, @@ -1268,14 +1258,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, if (bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b), WRITE, &buf) ?: - bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf)) { + bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf)) { printbuf_reset(&buf); prt_printf(&buf, "inserting invalid bkey\n "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); prt_printf(&buf, "\n "); bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b), WRITE, &buf); - bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf); + bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf); bch2_fs_inconsistent(c, "%s", buf.buf); dump_stack(); @@ -1327,12 +1317,12 @@ __bch2_btree_insert_keys_interior(struct btree_update *as, ; while (!bch2_keylist_empty(keys)) { - struct bkey_i *k = bch2_keylist_front(keys); + insert = bch2_keylist_front(keys); - if (bpos_gt(k->k.p, b->key.k.p)) + if (bpos_gt(insert->k.p, b->key.k.p)) break; - bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k); + bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); bch2_keylist_pop_front(keys); } } @@ -1409,7 +1399,7 @@ static void __btree_split_node(struct btree_update *as, out[i]->needs_whiteout = false; btree_keys_account_key_add(&n[i]->nr, 0, out[i]); - out[i] = bkey_next(out[i]); + out[i] = bkey_p_next(out[i]); } for (i = 0; i < 2; i++) { @@ -1499,12 +1489,12 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, path1 = get_unlocked_mut_path(trans, path->btree_id, n1->c.level, n1->key.k.p); six_lock_increment(&n1->c.lock, SIX_LOCK_intent); - mark_btree_node_locked(trans, path1, n1->c.level, SIX_LOCK_intent); + mark_btree_node_locked(trans, path1, n1->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, path1, n1); path2 = get_unlocked_mut_path(trans, path->btree_id, n2->c.level, n2->key.k.p); six_lock_increment(&n2->c.lock, SIX_LOCK_intent); - mark_btree_node_locked(trans, path2, n2->c.level, SIX_LOCK_intent); + mark_btree_node_locked(trans, path2, n2->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, path2, n2); /* @@ -1525,7 +1515,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, path2->locks_want++; BUG_ON(btree_node_locked(path2, n3->c.level)); six_lock_increment(&n3->c.lock, SIX_LOCK_intent); - mark_btree_node_locked(trans, path2, n3->c.level, SIX_LOCK_intent); + mark_btree_node_locked(trans, path2, n3->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, path2, n3); n3->sib_u64s[0] = U16_MAX; @@ -1549,7 +1539,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, path1 = get_unlocked_mut_path(trans, path->btree_id, n1->c.level, n1->key.k.p); six_lock_increment(&n1->c.lock, SIX_LOCK_intent); - mark_btree_node_locked(trans, path1, n1->c.level, SIX_LOCK_intent); + mark_btree_node_locked(trans, path1, n1->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, path1, n1); if (parent) @@ -1647,12 +1637,16 @@ bch2_btree_insert_keys_interior(struct btree_update *as, } /** - * bch_btree_insert_node - insert bkeys into a given btree node + * bch2_btree_insert_node - insert bkeys into a given btree node * - * @iter: btree iterator + * @as: btree_update object + * @trans: btree_trans object + * @path: path that points to current node + * @b: node to insert keys into * @keys: list of keys to insert - * @hook: insert callback - * @persistent: if not null, @persistent will wait on journal write + * @flags: transaction commit flags + * + * Returns: 0 on success, typically transaction restart error on failure * * Inserts as many keys as it can into a given btree node, splitting it if full. * If a split occurred, this function will return early. This can only happen @@ -1674,9 +1668,6 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t BUG_ON(!as || as->b); bch2_verify_keylist_sorted(keys); - if (!(local_clock() & 63)) - return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race); - ret = bch2_btree_node_lock_write(trans, path, &b->c); if (ret) return ret; @@ -1713,8 +1704,10 @@ split: * We could attempt to avoid the transaction restart, by calling * bch2_btree_path_upgrade() and allocating more nodes: */ - if (b->c.level >= as->update_level) + if (b->c.level >= as->update_level) { + trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b); return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race); + } return btree_split(as, trans, path, b, keys, flags); } @@ -1846,9 +1839,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, parent = btree_node_parent(path, b); as = bch2_btree_update_start(trans, path, level, false, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_USE_RESERVE| - flags); + BCH_TRANS_COMMIT_no_enospc|flags); ret = PTR_ERR_OR_ZERO(as); if (ret) goto err; @@ -1879,7 +1870,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, new_path = get_unlocked_mut_path(trans, path->btree_id, n->c.level, n->key.k.p); six_lock_increment(&n->c.lock, SIX_LOCK_intent); - mark_btree_node_locked(trans, new_path, n->c.level, SIX_LOCK_intent); + mark_btree_node_locked(trans, new_path, n->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, new_path, n); bkey_init(&delete.k); @@ -1923,9 +1914,6 @@ err_free_update: goto out; } -/** - * bch_btree_node_rewrite - Rewrite/move a btree node - */ int bch2_btree_node_rewrite(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, @@ -1937,7 +1925,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, struct btree_update *as; int ret; - flags |= BTREE_INSERT_NOFAIL; + flags |= BCH_TRANS_COMMIT_no_enospc; parent = btree_node_parent(iter->path, b); as = bch2_btree_update_start(trans, iter->path, b->c.level, @@ -1956,7 +1944,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, new_path = get_unlocked_mut_path(trans, iter->btree_id, n->c.level, n->key.k.p); six_lock_increment(&n->c.lock, SIX_LOCK_intent); - mark_btree_node_locked(trans, new_path, n->c.level, SIX_LOCK_intent); + mark_btree_node_locked(trans, new_path, n->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, new_path, n); trace_and_count(c, btree_node_rewrite, c, b); @@ -1983,7 +1971,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, out: if (new_path) bch2_path_put(trans, new_path, true); - bch2_btree_path_downgrade(trans, iter->path); + bch2_trans_downgrade(trans); return ret; err: bch2_btree_node_free_never_used(as, trans, n); @@ -1994,6 +1982,7 @@ err: struct async_btree_rewrite { struct bch_fs *c; struct work_struct work; + struct list_head list; enum btree_id btree_id; unsigned level; struct bpos pos; @@ -2003,6 +1992,7 @@ struct async_btree_rewrite { static int async_btree_node_rewrite_trans(struct btree_trans *trans, struct async_btree_rewrite *a) { + struct bch_fs *c = trans->c; struct btree_iter iter; struct btree *b; int ret; @@ -2014,8 +2004,18 @@ static int async_btree_node_rewrite_trans(struct btree_trans *trans, if (ret) goto out; - if (!b || b->data->keys.seq != a->seq) + if (!b || b->data->keys.seq != a->seq) { + struct printbuf buf = PRINTBUF; + + if (b) + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + else + prt_str(&buf, "(null"); + bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s", + __func__, a->seq, buf.buf); + printbuf_exit(&buf); goto out; + } ret = bch2_btree_node_rewrite(trans, &iter, b, 0); out: @@ -2024,28 +2024,29 @@ out: return ret; } -void async_btree_node_rewrite_work(struct work_struct *work) +static void async_btree_node_rewrite_work(struct work_struct *work) { struct async_btree_rewrite *a = container_of(work, struct async_btree_rewrite, work); struct bch_fs *c = a->c; + int ret; - bch2_trans_do(c, NULL, NULL, 0, - async_btree_node_rewrite_trans(&trans, a)); - percpu_ref_put(&c->writes); + ret = bch2_trans_do(c, NULL, NULL, 0, + async_btree_node_rewrite_trans(trans, a)); + if (ret) + bch_err_fn(c, ret); + bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite); kfree(a); } void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) { struct async_btree_rewrite *a; - - if (!percpu_ref_tryget_live(&c->writes)) - return; + int ret; a = kmalloc(sizeof(*a), GFP_NOFS); if (!a) { - percpu_ref_put(&c->writes); + bch_err(c, "%s: error allocating memory", __func__); return; } @@ -2054,15 +2055,67 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) a->level = b->c.level; a->pos = b->key.k.p; a->seq = b->data->keys.seq; - INIT_WORK(&a->work, async_btree_node_rewrite_work); + + if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) { + mutex_lock(&c->pending_node_rewrites_lock); + list_add(&a->list, &c->pending_node_rewrites); + mutex_unlock(&c->pending_node_rewrites_lock); + return; + } + + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) { + if (test_bit(BCH_FS_STARTED, &c->flags)) { + bch_err(c, "%s: error getting c->writes ref", __func__); + kfree(a); + return; + } + + ret = bch2_fs_read_write_early(c); + if (ret) { + bch_err_msg(c, ret, "going read-write"); + kfree(a); + return; + } + + bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); + } + queue_work(c->btree_interior_update_worker, &a->work); } +void bch2_do_pending_node_rewrites(struct bch_fs *c) +{ + struct async_btree_rewrite *a, *n; + + mutex_lock(&c->pending_node_rewrites_lock); + list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { + list_del(&a->list); + + bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); + queue_work(c->btree_interior_update_worker, &a->work); + } + mutex_unlock(&c->pending_node_rewrites_lock); +} + +void bch2_free_pending_node_rewrites(struct bch_fs *c) +{ + struct async_btree_rewrite *a, *n; + + mutex_lock(&c->pending_node_rewrites_lock); + list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { + list_del(&a->list); + + kfree(a); + } + mutex_unlock(&c->pending_node_rewrites_lock); +} + static int __bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, struct btree *new_hash, struct bkey_i *new_key, + unsigned commit_flags, bool skip_triggers) { struct bch_fs *c = trans->c; @@ -2102,7 +2155,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, btree_path_set_level_up(trans, iter2.path); - bch2_btree_path_check_sort(trans, iter2.path, 0); + trans->paths_sorted = false; ret = bch2_btree_iter_traverse(&iter2) ?: bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN); @@ -2123,12 +2176,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, trans->extra_journal_entries.nr += jset_u64s(new_key->k.u64s); } - ret = bch2_trans_commit(trans, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_NOCHECK_RW| - BTREE_INSERT_USE_RESERVE| - BTREE_INSERT_JOURNAL_RECLAIM| - JOURNAL_WATERMARK_reserved); + ret = bch2_trans_commit(trans, NULL, NULL, commit_flags); if (ret) goto err; @@ -2162,7 +2210,7 @@ err: int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, struct bkey_i *new_key, - bool skip_triggers) + unsigned commit_flags, bool skip_triggers) { struct bch_fs *c = trans->c; struct btree *new_hash = NULL; @@ -2183,19 +2231,17 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite if (btree_ptr_hash_val(new_key) != b->hash_val) { ret = bch2_btree_cache_cannibalize_lock(c, &cl); if (ret) { - bch2_trans_unlock(trans); - closure_sync(&cl); - ret = bch2_trans_relock(trans); + ret = drop_locks_do(trans, (closure_sync(&cl), 0)); if (ret) return ret; } - new_hash = bch2_btree_node_mem_alloc(c, false); + new_hash = bch2_btree_node_mem_alloc(trans, false); } path->intent_ref++; - ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, - new_key, skip_triggers); + ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key, + commit_flags, skip_triggers); --path->intent_ref; if (new_hash) { @@ -2213,7 +2259,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, struct btree *b, struct bkey_i *new_key, - bool skip_triggers) + unsigned commit_flags, bool skip_triggers) { struct btree_iter iter; int ret; @@ -2234,7 +2280,8 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, BUG_ON(!btree_node_hashed(b)); - ret = bch2_btree_node_update_key(trans, &iter, b, new_key, skip_triggers); + ret = bch2_btree_node_update_key(trans, &iter, b, new_key, + commit_flags, skip_triggers); out: bch2_trans_iter_exit(trans, &iter); return ret; @@ -2253,8 +2300,9 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b) bch2_btree_set_root_inmem(c, b); } -void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) +static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id) { + struct bch_fs *c = trans->c; struct closure cl; struct btree *b; int ret; @@ -2266,7 +2314,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) closure_sync(&cl); } while (ret); - b = bch2_btree_node_mem_alloc(c, false); + b = bch2_btree_node_mem_alloc(trans, false); bch2_btree_cache_cannibalize_unlock(c); set_btree_node_fake(b); @@ -2295,6 +2343,12 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); + return 0; +} + +void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) +{ + bch2_trans_run(c, __bch2_btree_root_alloc(trans, id)); } void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c) @@ -2307,7 +2361,7 @@ void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c) as, as->mode, as->nodes_written, - atomic_read(&as->cl.remaining) & CLOSURE_REMAINING_MASK, + closure_nr_remaining(&as->cl), as->journal.seq); mutex_unlock(&c->btree_interior_update_lock); } @@ -2333,48 +2387,37 @@ bool bch2_btree_interior_updates_flush(struct bch_fs *c) return ret; } -void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset) +void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry) { - struct btree_root *r; - struct jset_entry *entry; + struct btree_root *r = bch2_btree_id_root(c, entry->btree_id); mutex_lock(&c->btree_root_lock); - vstruct_for_each(jset, entry) - if (entry->type == BCH_JSET_ENTRY_btree_root) { - r = &c->btree_roots[entry->btree_id]; - r->level = entry->level; - r->alive = true; - bkey_copy(&r->key, &entry->start[0]); - } + r->level = entry->level; + r->alive = true; + bkey_copy(&r->key, (struct bkey_i *) entry->start); mutex_unlock(&c->btree_root_lock); } struct jset_entry * bch2_btree_roots_to_journal_entries(struct bch_fs *c, - struct jset_entry *start, - struct jset_entry *end) + struct jset_entry *end, + unsigned long skip) { - struct jset_entry *entry; - unsigned long have = 0; unsigned i; - for (entry = start; entry < end; entry = vstruct_next(entry)) - if (entry->type == BCH_JSET_ENTRY_btree_root) - __set_bit(entry->btree_id, &have); - mutex_lock(&c->btree_root_lock); - for (i = 0; i < BTREE_ID_NR; i++) - if (c->btree_roots[i].alive && !test_bit(i, &have)) { - journal_entry_set(end, - BCH_JSET_ENTRY_btree_root, - i, c->btree_roots[i].level, - &c->btree_roots[i].key, - c->btree_roots[i].key.u64s); + for (i = 0; i < btree_id_nr_alive(c); i++) { + struct btree_root *r = bch2_btree_id_root(c, i); + + if (r->alive && !test_bit(i, &skip)) { + journal_entry_set(end, BCH_JSET_ENTRY_btree_root, + i, r->level, &r->key, r->key.k.u64s); end = vstruct_next(end); } + } mutex_unlock(&c->btree_root_lock); @@ -2388,7 +2431,7 @@ void bch2_fs_btree_interior_update_exit(struct bch_fs *c) mempool_exit(&c->btree_interior_update_pool); } -int bch2_fs_btree_interior_update_init(struct bch_fs *c) +void bch2_fs_btree_interior_update_init_early(struct bch_fs *c) { mutex_init(&c->btree_reserve_cache_lock); INIT_LIST_HEAD(&c->btree_interior_update_list); @@ -2396,11 +2439,20 @@ int bch2_fs_btree_interior_update_init(struct bch_fs *c) mutex_init(&c->btree_interior_update_lock); INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work); + INIT_LIST_HEAD(&c->pending_node_rewrites); + mutex_init(&c->pending_node_rewrites_lock); +} + +int bch2_fs_btree_interior_update_init(struct bch_fs *c) +{ c->btree_interior_update_worker = alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1); if (!c->btree_interior_update_worker) - return -ENOMEM; + return -BCH_ERR_ENOMEM_btree_interior_update_worker_init; + + if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, + sizeof(struct btree_update))) + return -BCH_ERR_ENOMEM_btree_interior_update_pool_init; - return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, - sizeof(struct btree_update)); + return 0; }