#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_gc.h"
+#include "btree_journal_iter.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "btree_iter.h"
#include "btree_locking.h"
#include "buckets.h"
+#include "clock.h"
#include "error.h"
#include "extents.h"
#include "journal.h"
#include "journal_reclaim.h"
#include "keylist.h"
-#include "recovery.h"
#include "replicas.h"
#include "super-io.h"
+#include "trace.h"
#include <linux/random.h>
-#include <trace/events/bcachefs.h>
static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
struct btree_path *, struct btree *,
break;
bp = bkey_s_c_to_btree_ptr_v2(k);
- if (bpos_cmp(next_node, bp.v->min_key)) {
+ if (!bpos_eq(next_node, bp.v->min_key)) {
bch2_dump_btree_node(c, b);
bch2_bpos_to_text(&buf1, next_node);
bch2_bpos_to_text(&buf2, bp.v->min_key);
bch2_btree_node_iter_advance(&iter, b);
if (bch2_btree_node_iter_end(&iter)) {
- if (bpos_cmp(k.k->p, b->key.k.p)) {
+ if (!bpos_eq(k.k->p, b->key.k.p)) {
bch2_dump_btree_node(c, b);
bch2_bpos_to_text(&buf1, b->key.k.p);
bch2_bpos_to_text(&buf2, k.k->p);
{
trace_and_count(c, btree_node_free, c, b);
+ BUG_ON(btree_node_write_blocked(b));
BUG_ON(btree_node_dirty(b));
BUG_ON(btree_node_need_write(b));
BUG_ON(b == btree_node_root(c, b));
bch2_btree_node_hash_remove(&c->btree_cache, b);
__btree_node_free(c, b);
six_unlock_write(&b->c.lock);
- mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
+ mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
trans_for_each_path(trans, path)
if (path->l[level].b == b) {
struct bch_fs *c = trans->c;
struct write_point *wp;
struct btree *b;
- __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
+ BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
struct open_buckets ob = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
- unsigned nr_reserve;
- enum alloc_reserve alloc_reserve;
+ enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
+ unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim
+ ? BTREE_NODE_RESERVE
+ : 0;
int ret;
- if (flags & BTREE_INSERT_USE_RESERVE) {
- nr_reserve = 0;
- alloc_reserve = RESERVE_btree_movinggc;
- } else {
- nr_reserve = BTREE_NODE_RESERVE;
- alloc_reserve = RESERVE_btree;
- }
-
mutex_lock(&c->btree_reserve_cache_lock);
if (c->btree_reserve_cache_nr > nr_reserve) {
struct btree_alloc *a =
&devs_have,
res->nr_replicas,
c->opts.metadata_replicas_required,
- alloc_reserve, 0, cl, &wp);
+ watermark, 0, cl, &wp);
if (unlikely(ret))
return ERR_PTR(ret);
bch2_open_bucket_get(c, wp, &ob);
bch2_alloc_sectors_done(c, wp);
mem_alloc:
- b = bch2_btree_node_mem_alloc(c, interior_node);
+ b = bch2_btree_node_mem_alloc(trans, interior_node);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
BUG_ON(ret);
trace_and_count(c, btree_node_alloc, c, b);
+ bch2_increment_clock(c, btree_sectors(c), WRITE);
return b;
}
* which may require allocations as well.
*/
ret = commit_do(&trans, &as->disk_res, &journal_seq,
+ BCH_WATERMARK_reclaim|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_NOCHECK_RW|
- BTREE_INSERT_USE_RESERVE|
- BTREE_INSERT_JOURNAL_RECLAIM|
- JOURNAL_WATERMARK_reserved,
+ BTREE_INSERT_JOURNAL_RECLAIM,
btree_update_nodes_written_trans(&trans, as));
bch2_trans_unlock(&trans);
bch2_trans_unlock(&trans);
btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_intent);
mark_btree_node_locked(&trans, path, b->c.level, SIX_LOCK_intent);
- bch2_btree_path_level_init(&trans, path, b);
+ path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
+ path->l[b->c.level].b = b;
bch2_btree_node_lock_write_nofail(&trans, path, &b->c);
mutex_unlock(&c->btree_interior_update_lock);
- mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
+ mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
six_unlock_write(&b->c.lock);
btree_node_write_if_need(c, b, SIX_LOCK_intent);
BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
BUG_ON(!btree_node_dirty(b));
+ BUG_ON(!b->c.level);
as->mode = BTREE_INTERIOR_UPDATING_NODE;
as->b = b;
clear_btree_node_dirty_acct(c, b);
clear_btree_node_need_write(b);
+ clear_btree_node_write_blocked(b);
/*
* Does this node have unwritten data that has a pin on the journal?
? BCH_DISK_RESERVATION_NOFAIL : 0;
unsigned nr_nodes[2] = { 0, 0 };
unsigned update_level = level;
- int journal_flags = flags & JOURNAL_WATERMARK_MASK;
+ enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
+ unsigned journal_flags = 0;
int ret = 0;
u32 restart_count = trans->restart_count;
BUG_ON(!path->should_be_locked);
+ if (watermark == BCH_WATERMARK_copygc)
+ watermark = BCH_WATERMARK_btree_copygc;
+ if (watermark < BCH_WATERMARK_btree)
+ watermark = BCH_WATERMARK_btree;
+
+ flags &= ~BCH_WATERMARK_MASK;
+ flags |= watermark;
+
if (flags & BTREE_INSERT_JOURNAL_RECLAIM)
journal_flags |= JOURNAL_RES_GET_NONBLOCK;
+ journal_flags |= watermark;
while (1) {
nr_nodes[!!update_level] += 1 + split;
BKEY_BTREE_PTR_U64s_MAX * (1 + split)))
break;
- split = true;
+ split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
}
if (flags & BTREE_INSERT_GC_LOCK_HELD)
lockdep_assert_held(&c->gc_lock);
else if (!down_read_trylock(&c->gc_lock)) {
- bch2_trans_unlock(trans);
- down_read(&c->gc_lock);
- ret = bch2_trans_relock(trans);
+ ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0));
if (ret) {
up_read(&c->gc_lock);
return ERR_PTR(ret);
}
}
- as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
+ as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS);
memset(as, 0, sizeof(*as));
closure_init(&as->cl, NULL);
as->c = c;
BTREE_UPDATE_JOURNAL_RES,
journal_flags|JOURNAL_RES_GET_NONBLOCK);
if (ret) {
- bch2_trans_unlock(trans);
-
if (flags & BTREE_INSERT_JOURNAL_RECLAIM) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
goto err;
}
- ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
+ ret = drop_locks_do(trans,
+ bch2_journal_preres_get(&c->journal, &as->journal_preres,
BTREE_UPDATE_JOURNAL_RES,
- journal_flags);
- if (ret) {
+ journal_flags));
+ if (ret == -BCH_ERR_journal_preres_get_blocked) {
trace_and_count(c, trans_restart_journal_preres_get, trans, _RET_IP_, journal_flags);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get);
- goto err;
}
-
- ret = bch2_trans_relock(trans);
if (ret)
goto err;
}
bch2_err_matches(ret, ENOMEM)) {
struct closure cl;
+ /*
+ * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
+ * flag
+ */
+ if (bch2_err_matches(ret, ENOSPC) &&
+ (flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
+ watermark != BCH_WATERMARK_reclaim) {
+ ret = -BCH_ERR_journal_reclaim_would_deadlock;
+ goto err;
+ }
+
closure_init_stack(&cl);
do {
bch2_trans_unlock(trans);
closure_sync(&cl);
- } while (ret == -EAGAIN);
+ } while (bch2_err_matches(ret, BCH_ERR_operation_blocked));
}
if (ret) {
(b->c.level < btree_node_root(c, b)->c.level ||
!btree_node_dying(btree_node_root(c, b))));
- btree_node_root(c, b) = b;
+ bch2_btree_id_root(c, b->c.btree_id)->b = b;
mutex_unlock(&c->btree_root_lock);
bch2_recalc_btree_reserve(c);
while (!bch2_keylist_empty(keys)) {
struct bkey_i *k = bch2_keylist_front(keys);
- if (bpos_cmp(k->k.p, b->key.k.p) > 0)
+ if (bpos_gt(k->k.p, b->key.k.p))
break;
bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k);
struct btree *n[2])
{
struct bkey_packed *k;
- struct bpos n1_pos;
+ struct bpos n1_pos = POS_MIN;
struct btree_node_iter iter;
struct bset *bsets[2];
struct bkey_format_state format[2];
out[i]->needs_whiteout = false;
btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
- out[i] = bkey_next(out[i]);
+ out[i] = bkey_p_next(out[i]);
}
for (i = 0; i < 2; i++) {
struct keylist *keys)
{
if (!bch2_keylist_empty(keys) &&
- bpos_cmp(bch2_keylist_front(keys)->k.p,
- b->data->max_key) <= 0) {
+ bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) {
struct btree_node_iter node_iter;
bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
BUG_ON(!as || as->b);
bch2_verify_keylist_sorted(keys);
- if (!(local_clock() & 63))
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
-
ret = bch2_btree_node_lock_write(trans, path, &b->c);
if (ret)
return ret;
* We could attempt to avoid the transaction restart, by calling
* bch2_btree_path_upgrade() and allocating more nodes:
*/
- if (b->c.level >= as->update_level)
+ if (b->c.level >= as->update_level) {
+ trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
+ }
return btree_split(as, trans, path, b, keys, flags);
}
b = path->l[level].b;
- if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) ||
- (sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) {
+ if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
+ (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) {
b->sib_u64s[sib] = U16_MAX;
return 0;
}
next = m;
}
- if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) {
+ if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
bch2_bpos_to_text(&buf1, prev->data->max_key);
parent = btree_node_parent(path, b);
as = bch2_btree_update_start(trans, path, level, false,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE|
- flags);
+ BTREE_INSERT_NOFAIL|flags);
ret = PTR_ERR_OR_ZERO(as);
if (ret)
goto err;
struct async_btree_rewrite {
struct bch_fs *c;
struct work_struct work;
+ struct list_head list;
enum btree_id btree_id;
unsigned level;
struct bpos pos;
static int async_btree_node_rewrite_trans(struct btree_trans *trans,
struct async_btree_rewrite *a)
{
+ struct bch_fs *c = trans->c;
struct btree_iter iter;
struct btree *b;
int ret;
if (ret)
goto out;
- if (!b || b->data->keys.seq != a->seq)
+ if (!b || b->data->keys.seq != a->seq) {
+ struct printbuf buf = PRINTBUF;
+
+ if (b)
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ else
+ prt_str(&buf, "(null");
+ bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s",
+ __func__, a->seq, buf.buf);
+ printbuf_exit(&buf);
goto out;
+ }
ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
out:
return ret;
}
-void async_btree_node_rewrite_work(struct work_struct *work)
+static void async_btree_node_rewrite_work(struct work_struct *work)
{
struct async_btree_rewrite *a =
container_of(work, struct async_btree_rewrite, work);
struct bch_fs *c = a->c;
+ int ret;
- bch2_trans_do(c, NULL, NULL, 0,
+ ret = bch2_trans_do(c, NULL, NULL, 0,
async_btree_node_rewrite_trans(&trans, a));
- percpu_ref_put(&c->writes);
+ if (ret)
+ bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
+ bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
kfree(a);
}
void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
{
struct async_btree_rewrite *a;
-
- if (!percpu_ref_tryget_live(&c->writes))
- return;
+ int ret;
a = kmalloc(sizeof(*a), GFP_NOFS);
if (!a) {
- percpu_ref_put(&c->writes);
+ bch_err(c, "%s: error allocating memory", __func__);
return;
}
a->level = b->c.level;
a->pos = b->key.k.p;
a->seq = b->data->keys.seq;
-
INIT_WORK(&a->work, async_btree_node_rewrite_work);
+
+ if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) {
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_add(&a->list, &c->pending_node_rewrites);
+ mutex_unlock(&c->pending_node_rewrites_lock);
+ return;
+ }
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
+ if (test_bit(BCH_FS_STARTED, &c->flags)) {
+ bch_err(c, "%s: error getting c->writes ref", __func__);
+ kfree(a);
+ return;
+ }
+
+ ret = bch2_fs_read_write_early(c);
+ if (ret) {
+ bch_err(c, "%s: error going read-write: %s",
+ __func__, bch2_err_str(ret));
+ kfree(a);
+ return;
+ }
+
+ bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
+ }
+
queue_work(c->btree_interior_update_worker, &a->work);
}
+void bch2_do_pending_node_rewrites(struct bch_fs *c)
+{
+ struct async_btree_rewrite *a, *n;
+
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
+ list_del(&a->list);
+
+ bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
+ queue_work(c->btree_interior_update_worker, &a->work);
+ }
+ mutex_unlock(&c->pending_node_rewrites_lock);
+}
+
+void bch2_free_pending_node_rewrites(struct bch_fs *c)
+{
+ struct async_btree_rewrite *a, *n;
+
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
+ list_del(&a->list);
+
+ kfree(a);
+ }
+ mutex_unlock(&c->pending_node_rewrites_lock);
+}
+
static int __bch2_btree_node_update_key(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b, struct btree *new_hash,
struct bkey_i *new_key,
+ unsigned commit_flags,
bool skip_triggers)
{
struct bch_fs *c = trans->c;
_THIS_IP_);
BUG_ON(iter2.path->level != b->c.level);
- BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p));
+ BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
btree_path_set_level_up(trans, iter2.path);
- bch2_btree_path_check_sort(trans, iter2.path, 0);
+ trans->paths_sorted = false;
ret = bch2_btree_iter_traverse(&iter2) ?:
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN);
trans->extra_journal_entries.nr += jset_u64s(new_key->k.u64s);
}
- ret = bch2_trans_commit(trans, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_NOCHECK_RW|
- BTREE_INSERT_USE_RESERVE|
- BTREE_INSERT_JOURNAL_RECLAIM|
- JOURNAL_WATERMARK_reserved);
+ ret = bch2_trans_commit(trans, NULL, NULL, commit_flags);
if (ret)
goto err;
int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
struct btree *b, struct bkey_i *new_key,
- bool skip_triggers)
+ unsigned commit_flags, bool skip_triggers)
{
struct bch_fs *c = trans->c;
struct btree *new_hash = NULL;
if (btree_ptr_hash_val(new_key) != b->hash_val) {
ret = bch2_btree_cache_cannibalize_lock(c, &cl);
if (ret) {
- bch2_trans_unlock(trans);
- closure_sync(&cl);
- ret = bch2_trans_relock(trans);
+ ret = drop_locks_do(trans, (closure_sync(&cl), 0));
if (ret)
return ret;
}
- new_hash = bch2_btree_node_mem_alloc(c, false);
+ new_hash = bch2_btree_node_mem_alloc(trans, false);
}
path->intent_ref++;
- ret = __bch2_btree_node_update_key(trans, iter, b, new_hash,
- new_key, skip_triggers);
+ ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key,
+ commit_flags, skip_triggers);
--path->intent_ref;
if (new_hash) {
int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
struct btree *b, struct bkey_i *new_key,
- bool skip_triggers)
+ unsigned commit_flags, bool skip_triggers)
{
struct btree_iter iter;
int ret;
BUG_ON(!btree_node_hashed(b));
- ret = bch2_btree_node_update_key(trans, &iter, b, new_key, skip_triggers);
+ ret = bch2_btree_node_update_key(trans, &iter, b, new_key,
+ commit_flags, skip_triggers);
out:
bch2_trans_iter_exit(trans, &iter);
return ret;
bch2_btree_set_root_inmem(c, b);
}
-void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
+static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
{
+ struct bch_fs *c = trans->c;
struct closure cl;
struct btree *b;
int ret;
closure_sync(&cl);
} while (ret);
- b = bch2_btree_node_mem_alloc(c, false);
+ b = bch2_btree_node_mem_alloc(trans, false);
bch2_btree_cache_cannibalize_unlock(c);
set_btree_node_fake(b);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
+ return 0;
+}
+
+void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
+{
+ bch2_trans_run(c, __bch2_btree_root_alloc(&trans, id));
}
void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
as,
as->mode,
as->nodes_written,
- atomic_read(&as->cl.remaining) & CLOSURE_REMAINING_MASK,
+ closure_nr_remaining(&as->cl),
as->journal.seq);
mutex_unlock(&c->btree_interior_update_lock);
}
return ret;
}
-void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset)
+void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry)
{
- struct btree_root *r;
- struct jset_entry *entry;
+ struct btree_root *r = bch2_btree_id_root(c, entry->btree_id);
mutex_lock(&c->btree_root_lock);
- vstruct_for_each(jset, entry)
- if (entry->type == BCH_JSET_ENTRY_btree_root) {
- r = &c->btree_roots[entry->btree_id];
- r->level = entry->level;
- r->alive = true;
- bkey_copy(&r->key, &entry->start[0]);
- }
+ r->level = entry->level;
+ r->alive = true;
+ bkey_copy(&r->key, &entry->start[0]);
mutex_unlock(&c->btree_root_lock);
}
mutex_lock(&c->btree_root_lock);
- for (i = 0; i < BTREE_ID_NR; i++)
- if (c->btree_roots[i].alive && !test_bit(i, &have)) {
- journal_entry_set(end,
- BCH_JSET_ENTRY_btree_root,
- i, c->btree_roots[i].level,
- &c->btree_roots[i].key,
- c->btree_roots[i].key.u64s);
+ for (i = 0; i < btree_id_nr_alive(c); i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (r->alive && !test_bit(i, &have)) {
+ journal_entry_set(end, BCH_JSET_ENTRY_btree_root,
+ i, r->level, &r->key, r->key.k.u64s);
end = vstruct_next(end);
}
+ }
mutex_unlock(&c->btree_root_lock);
mempool_exit(&c->btree_interior_update_pool);
}
-int bch2_fs_btree_interior_update_init(struct bch_fs *c)
+void bch2_fs_btree_interior_update_init_early(struct bch_fs *c)
{
mutex_init(&c->btree_reserve_cache_lock);
INIT_LIST_HEAD(&c->btree_interior_update_list);
mutex_init(&c->btree_interior_update_lock);
INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work);
+ INIT_LIST_HEAD(&c->pending_node_rewrites);
+ mutex_init(&c->pending_node_rewrites_lock);
+}
+
+int bch2_fs_btree_interior_update_init(struct bch_fs *c)
+{
c->btree_interior_update_worker =
alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
if (!c->btree_interior_update_worker)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
- return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
- sizeof(struct btree_update));
+ if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
+ sizeof(struct btree_update)))
+ return -BCH_ERR_ENOMEM_btree_interior_update_pool_init;
+
+ return 0;
}