X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_update_interior.c;h=c741150e68af971d144e752df543078bf1396bf8;hb=8d5e53b88aaafe7c01fc369e52dbd1fc8955a77d;hp=5ce91ae6a69e800f9a839701c1ad7fc98c83f966;hpb=ae43a58d97fc00e31770142da832fb8a249808eb;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c index 5ce91ae..c741150 100644 --- a/libbcachefs/btree_update_interior.c +++ b/libbcachefs/btree_update_interior.c @@ -5,23 +5,24 @@ #include "bkey_methods.h" #include "btree_cache.h" #include "btree_gc.h" +#include "btree_journal_iter.h" #include "btree_update.h" #include "btree_update_interior.h" #include "btree_io.h" #include "btree_iter.h" #include "btree_locking.h" #include "buckets.h" +#include "clock.h" #include "error.h" #include "extents.h" #include "journal.h" #include "journal_reclaim.h" #include "keylist.h" -#include "recovery.h" #include "replicas.h" #include "super-io.h" +#include "trace.h" #include -#include static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *, struct btree_path *, struct btree *, @@ -72,7 +73,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) break; bp = bkey_s_c_to_btree_ptr_v2(k); - if (bpos_cmp(next_node, bp.v->min_key)) { + if (!bpos_eq(next_node, bp.v->min_key)) { bch2_dump_btree_node(c, b); bch2_bpos_to_text(&buf1, next_node); bch2_bpos_to_text(&buf2, bp.v->min_key); @@ -82,7 +83,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b) bch2_btree_node_iter_advance(&iter, b); if (bch2_btree_node_iter_end(&iter)) { - if (bpos_cmp(k.k->p, b->key.k.p)) { + if (!bpos_eq(k.k->p, b->key.k.p)) { bch2_dump_btree_node(c, b); bch2_bpos_to_text(&buf1, b->key.k.p); bch2_bpos_to_text(&buf2, k.k->p); @@ -161,6 +162,7 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b) { trace_and_count(c, btree_node_free, c, b); + BUG_ON(btree_node_write_blocked(b)); BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_need_write(b)); BUG_ON(b == btree_node_root(c, b)); @@ -186,7 +188,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, bch2_btree_node_hash_remove(&c->btree_cache, b); __btree_node_free(c, b); six_unlock_write(&b->c.lock); - mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); trans_for_each_path(trans, path) if (path->l[level].b == b) { @@ -241,19 +243,14 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, struct bch_fs *c = trans->c; struct write_point *wp; struct btree *b; - __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; + BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; struct open_buckets ob = { .nr = 0 }; struct bch_devs_list devs_have = (struct bch_devs_list) { 0 }; - unsigned nr_reserve; - enum alloc_reserve alloc_reserve; - - if (flags & BTREE_INSERT_USE_RESERVE) { - nr_reserve = 0; - alloc_reserve = RESERVE_btree_movinggc; - } else { - nr_reserve = BTREE_NODE_RESERVE; - alloc_reserve = RESERVE_btree; - } + enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; + unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim + ? BTREE_NODE_RESERVE + : 0; + int ret; mutex_lock(&c->btree_reserve_cache_lock); if (c->btree_reserve_cache_nr > nr_reserve) { @@ -268,7 +265,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, mutex_unlock(&c->btree_reserve_cache_lock); retry: - wp = bch2_alloc_sectors_start_trans(trans, + ret = bch2_alloc_sectors_start_trans(trans, c->opts.metadata_target ?: c->opts.foreground_target, 0, @@ -276,9 +273,9 @@ retry: &devs_have, res->nr_replicas, c->opts.metadata_replicas_required, - alloc_reserve, 0, cl); - if (IS_ERR(wp)) - return ERR_CAST(wp); + watermark, 0, cl, &wp); + if (unlikely(ret)) + return ERR_PTR(ret); if (wp->sectors_free < btree_sectors(c)) { struct open_bucket *ob; @@ -298,7 +295,7 @@ retry: bch2_open_bucket_get(c, wp, &ob); bch2_alloc_sectors_done(c, wp); mem_alloc: - b = bch2_btree_node_mem_alloc(c, interior_node); + b = bch2_btree_node_mem_alloc(trans, interior_node); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); @@ -361,6 +358,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, BUG_ON(ret); trace_and_count(c, btree_node_alloc, c, b); + bch2_increment_clock(c, btree_sectors(c), WRITE); return b; } @@ -377,14 +375,19 @@ static void btree_set_max(struct btree *b, struct bpos pos) b->data->max_key = pos; } -struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as, - struct btree_trans *trans, - struct btree *b, - struct bkey_format format) +static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as, + struct btree_trans *trans, + struct btree *b) { - struct btree *n; + struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level); + struct bkey_format format = bch2_btree_calc_format(b); - n = bch2_btree_node_alloc(as, trans, b->c.level); + /* + * The keys might expand with the new format - if they wouldn't fit in + * the btree node anymore, use the old format for now: + */ + if (!bch2_btree_node_format_fits(as->c, b, &format)) + format = b->format; SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1); @@ -397,27 +400,9 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as, bch2_btree_sort_into(as->c, n, b); btree_node_reset_sib_u64s(n); - - n->key.k.p = b->key.k.p; return n; } -static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as, - struct btree_trans *trans, - struct btree *b) -{ - struct bkey_format new_f = bch2_btree_calc_format(b); - - /* - * The keys might expand with the new format - if they wouldn't fit in - * the btree node anymore, use the old format for now: - */ - if (!bch2_btree_node_format_fits(as->c, b, &new_f)) - new_f = b->format; - - return __bch2_btree_node_alloc_replacement(as, trans, b, new_f); -} - static struct btree *__btree_root_alloc(struct btree_update *as, struct btree_trans *trans, unsigned level) { @@ -656,11 +641,10 @@ static void btree_update_nodes_written(struct btree_update *as) * which may require allocations as well. */ ret = commit_do(&trans, &as->disk_res, &journal_seq, + BCH_WATERMARK_reclaim| BTREE_INSERT_NOFAIL| BTREE_INSERT_NOCHECK_RW| - BTREE_INSERT_USE_RESERVE| - BTREE_INSERT_JOURNAL_RECLAIM| - JOURNAL_WATERMARK_reserved, + BTREE_INSERT_JOURNAL_RECLAIM, btree_update_nodes_written_trans(&trans, as)); bch2_trans_unlock(&trans); @@ -697,7 +681,8 @@ err: bch2_trans_unlock(&trans); btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_intent); mark_btree_node_locked(&trans, path, b->c.level, SIX_LOCK_intent); - bch2_btree_path_level_init(&trans, path, b); + path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); + path->l[b->c.level].b = b; bch2_btree_node_lock_write_nofail(&trans, path, &b->c); @@ -735,7 +720,7 @@ err: mutex_unlock(&c->btree_interior_update_lock); - mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); six_unlock_write(&b->c.lock); btree_node_write_if_need(c, b, SIX_LOCK_intent); @@ -818,6 +803,7 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b) BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE); BUG_ON(!btree_node_dirty(b)); + BUG_ON(!b->c.level); as->mode = BTREE_INTERIOR_UPDATING_NODE; as->b = b; @@ -987,6 +973,7 @@ static void bch2_btree_interior_update_will_free_node(struct btree_update *as, clear_btree_node_dirty_acct(c, b); clear_btree_node_need_write(b); + clear_btree_node_write_blocked(b); /* * Does this node have unwritten data that has a pin on the journal? @@ -1055,14 +1042,24 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, ? BCH_DISK_RESERVATION_NOFAIL : 0; unsigned nr_nodes[2] = { 0, 0 }; unsigned update_level = level; - int journal_flags = flags & JOURNAL_WATERMARK_MASK; + enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; + unsigned journal_flags = 0; int ret = 0; u32 restart_count = trans->restart_count; BUG_ON(!path->should_be_locked); + if (watermark == BCH_WATERMARK_copygc) + watermark = BCH_WATERMARK_btree_copygc; + if (watermark < BCH_WATERMARK_btree) + watermark = BCH_WATERMARK_btree; + + flags &= ~BCH_WATERMARK_MASK; + flags |= watermark; + if (flags & BTREE_INSERT_JOURNAL_RECLAIM) journal_flags |= JOURNAL_RES_GET_NONBLOCK; + journal_flags |= watermark; while (1) { nr_nodes[!!update_level] += 1 + split; @@ -1083,22 +1080,20 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, BKEY_BTREE_PTR_U64s_MAX * (1 + split))) break; - split = true; + split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c); } if (flags & BTREE_INSERT_GC_LOCK_HELD) lockdep_assert_held(&c->gc_lock); else if (!down_read_trylock(&c->gc_lock)) { - bch2_trans_unlock(trans); - down_read(&c->gc_lock); - ret = bch2_trans_relock(trans); + ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0)); if (ret) { up_read(&c->gc_lock); return ERR_PTR(ret); } } - as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO); + as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS); memset(as, 0, sizeof(*as)); closure_init(&as->cl, NULL); as->c = c; @@ -1134,23 +1129,19 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, BTREE_UPDATE_JOURNAL_RES, journal_flags|JOURNAL_RES_GET_NONBLOCK); if (ret) { - bch2_trans_unlock(trans); - if (flags & BTREE_INSERT_JOURNAL_RECLAIM) { ret = -BCH_ERR_journal_reclaim_would_deadlock; goto err; } - ret = bch2_journal_preres_get(&c->journal, &as->journal_preres, + ret = drop_locks_do(trans, + bch2_journal_preres_get(&c->journal, &as->journal_preres, BTREE_UPDATE_JOURNAL_RES, - journal_flags); - if (ret) { + journal_flags)); + if (ret == -BCH_ERR_journal_preres_get_blocked) { trace_and_count(c, trans_restart_journal_preres_get, trans, _RET_IP_, journal_flags); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get); - goto err; } - - ret = bch2_trans_relock(trans); if (ret) goto err; } @@ -1167,6 +1158,17 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, bch2_err_matches(ret, ENOMEM)) { struct closure cl; + /* + * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK + * flag + */ + if (bch2_err_matches(ret, ENOSPC) && + (flags & BTREE_INSERT_JOURNAL_RECLAIM) && + watermark != BCH_WATERMARK_reclaim) { + ret = -BCH_ERR_journal_reclaim_would_deadlock; + goto err; + } + closure_init_stack(&cl); do { @@ -1174,11 +1176,12 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, bch2_trans_unlock(trans); closure_sync(&cl); - } while (ret == -EAGAIN); + } while (bch2_err_matches(ret, BCH_ERR_operation_blocked)); } if (ret) { - trace_and_count(c, btree_reserve_get_fail, trans->fn, _RET_IP_, nr_nodes[0] + nr_nodes[1]); + trace_and_count(c, btree_reserve_get_fail, trans->fn, + _RET_IP_, nr_nodes[0] + nr_nodes[1], ret); goto err; } @@ -1207,7 +1210,7 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b) (b->c.level < btree_node_root(c, b)->c.level || !btree_node_dying(btree_node_root(c, b)))); - btree_node_root(c, b) = b; + bch2_btree_id_root(c, b->c.btree_id)->b = b; mutex_unlock(&c->btree_root_lock); bch2_recalc_btree_reserve(c); @@ -1269,6 +1272,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct bch_fs *c = as->c; struct bkey_packed *k; struct printbuf buf = PRINTBUF; + unsigned long old, new, v; BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 && !btree_ptr_sectors_written(insert)); @@ -1306,7 +1310,15 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, bch2_btree_bset_insert_key(trans, path, b, node_iter, insert); set_btree_node_dirty_acct(c, b); - set_btree_node_need_write(b); + + v = READ_ONCE(b->flags); + do { + old = new = v; + + new &= ~BTREE_WRITE_TYPE_MASK; + new |= BTREE_WRITE_interior; + new |= 1 << BTREE_NODE_need_write; + } while ((v = cmpxchg(&b->flags, old, new)) != old); printbuf_exit(&buf); } @@ -1329,8 +1341,12 @@ __bch2_btree_insert_keys_interior(struct btree_update *as, ; while (!bch2_keylist_empty(keys)) { - bch2_insert_fixup_btree_ptr(as, trans, path, b, - &node_iter, bch2_keylist_front(keys)); + struct bkey_i *k = bch2_keylist_front(keys); + + if (bpos_gt(k->k.p, b->key.k.p)) + break; + + bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k); bch2_keylist_pop_front(keys); } } @@ -1339,109 +1355,91 @@ __bch2_btree_insert_keys_interior(struct btree_update *as, * Move keys from n1 (original replacement node, now lower node) to n2 (higher * node) */ -static struct btree *__btree_split_node(struct btree_update *as, - struct btree_trans *trans, - struct btree *n1) +static void __btree_split_node(struct btree_update *as, + struct btree_trans *trans, + struct btree *b, + struct btree *n[2]) { - struct bkey_format_state s; - size_t nr_packed = 0, nr_unpacked = 0; - struct btree *n2; - struct bset *set1, *set2; - struct bkey_packed *k, *set2_start, *set2_end, *out, *prev = NULL; - struct bpos n1_pos; + struct bkey_packed *k; + struct bpos n1_pos = POS_MIN; + struct btree_node_iter iter; + struct bset *bsets[2]; + struct bkey_format_state format[2]; + struct bkey_packed *out[2]; + struct bkey uk; + unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5; + int i; - n2 = bch2_btree_node_alloc(as, trans, n1->c.level); + for (i = 0; i < 2; i++) { + BUG_ON(n[i]->nsets != 1); - n2->data->max_key = n1->data->max_key; - n2->data->format = n1->format; - SET_BTREE_NODE_SEQ(n2->data, BTREE_NODE_SEQ(n1->data)); - n2->key.k.p = n1->key.k.p; + bsets[i] = btree_bset_first(n[i]); + out[i] = bsets[i]->start; - set1 = btree_bset_first(n1); - set2 = btree_bset_first(n2); + SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1); + bch2_bkey_format_init(&format[i]); + } - /* - * Has to be a linear search because we don't have an auxiliary - * search tree yet - */ - k = set1->start; - while (1) { - struct bkey_packed *n = bkey_next(k); + u64s = 0; + for_each_btree_node_key(b, k, &iter) { + if (bkey_deleted(k)) + continue; + + i = u64s >= n1_u64s; + u64s += k->u64s; + uk = bkey_unpack_key(b, k); + if (!i) + n1_pos = uk.p; + bch2_bkey_format_add_key(&format[i], &uk); + } - if (n == vstruct_last(set1)) - break; - if (k->_data - set1->_data >= (le16_to_cpu(set1->u64s) * 3) / 5) - break; + btree_set_min(n[0], b->data->min_key); + btree_set_max(n[0], n1_pos); + btree_set_min(n[1], bpos_successor(n1_pos)); + btree_set_max(n[1], b->data->max_key); - if (bkey_packed(k)) - nr_packed++; - else - nr_unpacked++; + for (i = 0; i < 2; i++) { + bch2_bkey_format_add_pos(&format[i], n[i]->data->min_key); + bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key); - prev = k; - k = n; + n[i]->data->format = bch2_bkey_format_done(&format[i]); + btree_node_set_format(n[i], n[i]->data->format); } - BUG_ON(!prev); - set2_start = k; - set2_end = vstruct_last(set1); - - set1->u64s = cpu_to_le16((u64 *) set2_start - set1->_data); - set_btree_bset_end(n1, n1->set); + u64s = 0; + for_each_btree_node_key(b, k, &iter) { + if (bkey_deleted(k)) + continue; - n1->nr.live_u64s = le16_to_cpu(set1->u64s); - n1->nr.bset_u64s[0] = le16_to_cpu(set1->u64s); - n1->nr.packed_keys = nr_packed; - n1->nr.unpacked_keys = nr_unpacked; + i = u64s >= n1_u64s; + u64s += k->u64s; - n1_pos = bkey_unpack_pos(n1, prev); - if (as->c->sb.version < bcachefs_metadata_version_snapshot) - n1_pos.snapshot = U32_MAX; + if (bch2_bkey_transform(&n[i]->format, out[i], bkey_packed(k) + ? &b->format: &bch2_bkey_format_current, k)) + out[i]->format = KEY_FORMAT_LOCAL_BTREE; + else + bch2_bkey_unpack(b, (void *) out[i], k); - btree_set_max(n1, n1_pos); - btree_set_min(n2, bpos_successor(n1->key.k.p)); + out[i]->needs_whiteout = false; - bch2_bkey_format_init(&s); - bch2_bkey_format_add_pos(&s, n2->data->min_key); - bch2_bkey_format_add_pos(&s, n2->data->max_key); - - for (k = set2_start; k != set2_end; k = bkey_next(k)) { - struct bkey uk = bkey_unpack_key(n1, k); - bch2_bkey_format_add_key(&s, &uk); + btree_keys_account_key_add(&n[i]->nr, 0, out[i]); + out[i] = bkey_p_next(out[i]); } - n2->data->format = bch2_bkey_format_done(&s); - btree_node_set_format(n2, n2->data->format); - - out = set2->start; - memset(&n2->nr, 0, sizeof(n2->nr)); - - for (k = set2_start; k != set2_end; k = bkey_next(k)) { - BUG_ON(!bch2_bkey_transform(&n2->format, out, bkey_packed(k) - ? &n1->format : &bch2_bkey_format_current, k)); - out->format = KEY_FORMAT_LOCAL_BTREE; - btree_keys_account_key_add(&n2->nr, 0, out); - out = bkey_next(out); - } + for (i = 0; i < 2; i++) { + bsets[i]->u64s = cpu_to_le16((u64 *) out[i] - bsets[i]->_data); - set2->u64s = cpu_to_le16((u64 *) out - set2->_data); - set_btree_bset_end(n2, n2->set); + BUG_ON(!bsets[i]->u64s); - BUG_ON(!set1->u64s); - BUG_ON(!set2->u64s); + set_btree_bset_end(n[i], n[i]->set); - btree_node_reset_sib_u64s(n1); - btree_node_reset_sib_u64s(n2); + btree_node_reset_sib_u64s(n[i]); - bch2_verify_btree_nr_keys(n1); - bch2_verify_btree_nr_keys(n2); + bch2_verify_btree_nr_keys(n[i]); - if (n1->c.level) { - btree_node_interior_verify(as->c, n1); - btree_node_interior_verify(as->c, n2); + if (b->c.level) + btree_node_interior_verify(as->c, n[i]); } - - return n2; } /* @@ -1461,41 +1459,16 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b, struct keylist *keys) { - struct btree_node_iter node_iter; - struct bkey_i *k = bch2_keylist_front(keys); - struct bkey_packed *src, *dst, *n; - struct bset *i; + if (!bch2_keylist_empty(keys) && + bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) { + struct btree_node_iter node_iter; - bch2_btree_node_iter_init(&node_iter, b, &k->k.p); + bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); - __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); + __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); - /* - * We can't tolerate whiteouts here - with whiteouts there can be - * duplicate keys, and it would be rather bad if we picked a duplicate - * for the pivot: - */ - i = btree_bset_first(b); - src = dst = i->start; - while (src != vstruct_last(i)) { - n = bkey_next(src); - if (!bkey_deleted(src)) { - memmove_u64s_down(dst, src, src->u64s); - dst = bkey_next(dst); - } - src = n; + btree_node_interior_verify(as->c, b); } - - /* Also clear out the unwritten whiteouts area: */ - b->whiteout_u64s = 0; - - i->u64s = cpu_to_le16((u64 *) dst - i->_data); - set_btree_bset_end(b, b->set); - - BUG_ON(b->nsets != 1 || - b->nr.live_u64s != le16_to_cpu(btree_bset_first(b)->u64s)); - - btree_node_interior_verify(as->c, b); } static int btree_split(struct btree_update *as, struct btree_trans *trans, @@ -1514,15 +1487,21 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, bch2_btree_interior_update_will_free_node(as, b); - n1 = bch2_btree_node_alloc_replacement(as, trans, b); - - if (keys) - btree_split_insert_keys(as, trans, path, n1, keys); + if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) { + struct btree *n[2]; - if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) { trace_and_count(c, btree_node_split, c, b); - n2 = __btree_split_node(as, trans, n1); + n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level); + n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level); + + __btree_split_node(as, trans, b, n); + + if (keys) { + btree_split_insert_keys(as, trans, path, n1, keys); + btree_split_insert_keys(as, trans, path, n2, keys); + BUG_ON(!bch2_keylist_empty(keys)); + } bch2_btree_build_aux_trees(n2); bch2_btree_build_aux_trees(n1); @@ -1571,6 +1550,13 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, } else { trace_and_count(c, btree_node_compact, c, b); + n1 = bch2_btree_node_alloc_replacement(as, trans, b); + + if (keys) { + btree_split_insert_keys(as, trans, path, n1, keys); + BUG_ON(!bch2_keylist_empty(keys)); + } + bch2_btree_build_aux_trees(n1); bch2_btree_update_add_new_node(as, n1); six_unlock_write(&n1->c.lock); @@ -1702,9 +1688,6 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t BUG_ON(!as || as->b); bch2_verify_keylist_sorted(keys); - if (!(local_clock() & 63)) - return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race); - ret = bch2_btree_node_lock_write(trans, path, &b->c); if (ret) return ret; @@ -1741,8 +1724,10 @@ split: * We could attempt to avoid the transaction restart, by calling * bch2_btree_path_upgrade() and allocating more nodes: */ - if (b->c.level >= as->update_level) + if (b->c.level >= as->update_level) { + trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b); return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race); + } return btree_split(as, trans, path, b, keys, flags); } @@ -1798,8 +1783,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, b = path->l[level].b; - if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) || - (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) { + if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) || + (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) { b->sib_u64s[sib] = U16_MAX; return 0; } @@ -1832,7 +1817,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, next = m; } - if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) { + if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) { struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; bch2_bpos_to_text(&buf1, prev->data->max_key); @@ -1874,9 +1859,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, parent = btree_node_parent(path, b); as = bch2_btree_update_start(trans, path, level, false, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_USE_RESERVE| - flags); + BTREE_INSERT_NOFAIL|flags); ret = PTR_ERR_OR_ZERO(as); if (ret) goto err; @@ -2022,6 +2005,7 @@ err: struct async_btree_rewrite { struct bch_fs *c; struct work_struct work; + struct list_head list; enum btree_id btree_id; unsigned level; struct bpos pos; @@ -2031,6 +2015,7 @@ struct async_btree_rewrite { static int async_btree_node_rewrite_trans(struct btree_trans *trans, struct async_btree_rewrite *a) { + struct bch_fs *c = trans->c; struct btree_iter iter; struct btree *b; int ret; @@ -2042,8 +2027,18 @@ static int async_btree_node_rewrite_trans(struct btree_trans *trans, if (ret) goto out; - if (!b || b->data->keys.seq != a->seq) + if (!b || b->data->keys.seq != a->seq) { + struct printbuf buf = PRINTBUF; + + if (b) + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + else + prt_str(&buf, "(null"); + bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s", + __func__, a->seq, buf.buf); + printbuf_exit(&buf); goto out; + } ret = bch2_btree_node_rewrite(trans, &iter, b, 0); out: @@ -2052,28 +2047,29 @@ out: return ret; } -void async_btree_node_rewrite_work(struct work_struct *work) +static void async_btree_node_rewrite_work(struct work_struct *work) { struct async_btree_rewrite *a = container_of(work, struct async_btree_rewrite, work); struct bch_fs *c = a->c; + int ret; - bch2_trans_do(c, NULL, NULL, 0, + ret = bch2_trans_do(c, NULL, NULL, 0, async_btree_node_rewrite_trans(&trans, a)); - percpu_ref_put(&c->writes); + if (ret) + bch_err(c, "%s: error %s", __func__, bch2_err_str(ret)); + bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite); kfree(a); } void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) { struct async_btree_rewrite *a; - - if (!percpu_ref_tryget_live(&c->writes)) - return; + int ret; a = kmalloc(sizeof(*a), GFP_NOFS); if (!a) { - percpu_ref_put(&c->writes); + bch_err(c, "%s: error allocating memory", __func__); return; } @@ -2082,15 +2078,68 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) a->level = b->c.level; a->pos = b->key.k.p; a->seq = b->data->keys.seq; - INIT_WORK(&a->work, async_btree_node_rewrite_work); + + if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) { + mutex_lock(&c->pending_node_rewrites_lock); + list_add(&a->list, &c->pending_node_rewrites); + mutex_unlock(&c->pending_node_rewrites_lock); + return; + } + + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) { + if (test_bit(BCH_FS_STARTED, &c->flags)) { + bch_err(c, "%s: error getting c->writes ref", __func__); + kfree(a); + return; + } + + ret = bch2_fs_read_write_early(c); + if (ret) { + bch_err(c, "%s: error going read-write: %s", + __func__, bch2_err_str(ret)); + kfree(a); + return; + } + + bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); + } + queue_work(c->btree_interior_update_worker, &a->work); } +void bch2_do_pending_node_rewrites(struct bch_fs *c) +{ + struct async_btree_rewrite *a, *n; + + mutex_lock(&c->pending_node_rewrites_lock); + list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { + list_del(&a->list); + + bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); + queue_work(c->btree_interior_update_worker, &a->work); + } + mutex_unlock(&c->pending_node_rewrites_lock); +} + +void bch2_free_pending_node_rewrites(struct bch_fs *c) +{ + struct async_btree_rewrite *a, *n; + + mutex_lock(&c->pending_node_rewrites_lock); + list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { + list_del(&a->list); + + kfree(a); + } + mutex_unlock(&c->pending_node_rewrites_lock); +} + static int __bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, struct btree *new_hash, struct bkey_i *new_key, + unsigned commit_flags, bool skip_triggers) { struct bch_fs *c = trans->c; @@ -2126,11 +2175,11 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, _THIS_IP_); BUG_ON(iter2.path->level != b->c.level); - BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p)); + BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p)); btree_path_set_level_up(trans, iter2.path); - bch2_btree_path_check_sort(trans, iter2.path, 0); + trans->paths_sorted = false; ret = bch2_btree_iter_traverse(&iter2) ?: bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN); @@ -2151,12 +2200,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, trans->extra_journal_entries.nr += jset_u64s(new_key->k.u64s); } - ret = bch2_trans_commit(trans, NULL, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_NOCHECK_RW| - BTREE_INSERT_USE_RESERVE| - BTREE_INSERT_JOURNAL_RECLAIM| - JOURNAL_WATERMARK_reserved); + ret = bch2_trans_commit(trans, NULL, NULL, commit_flags); if (ret) goto err; @@ -2190,7 +2234,7 @@ err: int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, struct bkey_i *new_key, - bool skip_triggers) + unsigned commit_flags, bool skip_triggers) { struct bch_fs *c = trans->c; struct btree *new_hash = NULL; @@ -2211,19 +2255,17 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite if (btree_ptr_hash_val(new_key) != b->hash_val) { ret = bch2_btree_cache_cannibalize_lock(c, &cl); if (ret) { - bch2_trans_unlock(trans); - closure_sync(&cl); - ret = bch2_trans_relock(trans); + ret = drop_locks_do(trans, (closure_sync(&cl), 0)); if (ret) return ret; } - new_hash = bch2_btree_node_mem_alloc(c, false); + new_hash = bch2_btree_node_mem_alloc(trans, false); } path->intent_ref++; - ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, - new_key, skip_triggers); + ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key, + commit_flags, skip_triggers); --path->intent_ref; if (new_hash) { @@ -2241,7 +2283,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, struct btree *b, struct bkey_i *new_key, - bool skip_triggers) + unsigned commit_flags, bool skip_triggers) { struct btree_iter iter; int ret; @@ -2262,7 +2304,8 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, BUG_ON(!btree_node_hashed(b)); - ret = bch2_btree_node_update_key(trans, &iter, b, new_key, skip_triggers); + ret = bch2_btree_node_update_key(trans, &iter, b, new_key, + commit_flags, skip_triggers); out: bch2_trans_iter_exit(trans, &iter); return ret; @@ -2281,8 +2324,9 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b) bch2_btree_set_root_inmem(c, b); } -void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) +static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id) { + struct bch_fs *c = trans->c; struct closure cl; struct btree *b; int ret; @@ -2294,7 +2338,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) closure_sync(&cl); } while (ret); - b = bch2_btree_node_mem_alloc(c, false); + b = bch2_btree_node_mem_alloc(trans, false); bch2_btree_cache_cannibalize_unlock(c); set_btree_node_fake(b); @@ -2323,6 +2367,12 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); + return 0; +} + +void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) +{ + bch2_trans_run(c, __bch2_btree_root_alloc(&trans, id)); } void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c) @@ -2335,7 +2385,7 @@ void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c) as, as->mode, as->nodes_written, - atomic_read(&as->cl.remaining) & CLOSURE_REMAINING_MASK, + closure_nr_remaining(&as->cl), as->journal.seq); mutex_unlock(&c->btree_interior_update_lock); } @@ -2361,20 +2411,15 @@ bool bch2_btree_interior_updates_flush(struct bch_fs *c) return ret; } -void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset) +void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry) { - struct btree_root *r; - struct jset_entry *entry; + struct btree_root *r = bch2_btree_id_root(c, entry->btree_id); mutex_lock(&c->btree_root_lock); - vstruct_for_each(jset, entry) - if (entry->type == BCH_JSET_ENTRY_btree_root) { - r = &c->btree_roots[entry->btree_id]; - r->level = entry->level; - r->alive = true; - bkey_copy(&r->key, &entry->start[0]); - } + r->level = entry->level; + r->alive = true; + bkey_copy(&r->key, &entry->start[0]); mutex_unlock(&c->btree_root_lock); } @@ -2394,15 +2439,15 @@ bch2_btree_roots_to_journal_entries(struct bch_fs *c, mutex_lock(&c->btree_root_lock); - for (i = 0; i < BTREE_ID_NR; i++) - if (c->btree_roots[i].alive && !test_bit(i, &have)) { - journal_entry_set(end, - BCH_JSET_ENTRY_btree_root, - i, c->btree_roots[i].level, - &c->btree_roots[i].key, - c->btree_roots[i].key.u64s); + for (i = 0; i < btree_id_nr_alive(c); i++) { + struct btree_root *r = bch2_btree_id_root(c, i); + + if (r->alive && !test_bit(i, &have)) { + journal_entry_set(end, BCH_JSET_ENTRY_btree_root, + i, r->level, &r->key, r->key.k.u64s); end = vstruct_next(end); } + } mutex_unlock(&c->btree_root_lock); @@ -2416,7 +2461,7 @@ void bch2_fs_btree_interior_update_exit(struct bch_fs *c) mempool_exit(&c->btree_interior_update_pool); } -int bch2_fs_btree_interior_update_init(struct bch_fs *c) +void bch2_fs_btree_interior_update_init_early(struct bch_fs *c) { mutex_init(&c->btree_reserve_cache_lock); INIT_LIST_HEAD(&c->btree_interior_update_list); @@ -2424,11 +2469,20 @@ int bch2_fs_btree_interior_update_init(struct bch_fs *c) mutex_init(&c->btree_interior_update_lock); INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work); + INIT_LIST_HEAD(&c->pending_node_rewrites); + mutex_init(&c->pending_node_rewrites_lock); +} + +int bch2_fs_btree_interior_update_init(struct bch_fs *c) +{ c->btree_interior_update_worker = alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1); if (!c->btree_interior_update_worker) - return -ENOMEM; + return -BCH_ERR_ENOMEM_btree_interior_update_worker_init; - return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, - sizeof(struct btree_update)); + if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, + sizeof(struct btree_update))) + return -BCH_ERR_ENOMEM_btree_interior_update_pool_init; + + return 0; }