set_btree_node_accessed(b);
set_btree_node_dirty(b);
+ set_btree_node_need_write(b);
bch2_bset_init_first(b, &b->data->keys);
memset(&b->nr, 0, sizeof(b->nr));
closure_debug_destroy(&as->cl);
mempool_free(as, &c->btree_interior_update_pool);
- percpu_ref_put(&c->writes);
closure_wake_up(&c->btree_interior_update_wait);
mutex_unlock(&c->btree_interior_update_lock);
closure_wait(&btree_current_write(b)->wait, cl);
list_del(&as->write_blocked_list);
+
+ /*
+ * for flush_held_btree_writes() waiting on updates to flush or
+ * nodes to be writeable:
+ */
+ closure_wake_up(&c->btree_interior_update_wait);
mutex_unlock(&c->btree_interior_update_lock);
/*
list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
list_del(&p->write_blocked_list);
btree_update_reparent(as, p);
+
+ /*
+ * for flush_held_btree_writes() waiting on updates to flush or
+ * nodes to be writeable:
+ */
+ closure_wake_up(&c->btree_interior_update_wait);
}
clear_btree_node_dirty(b);
struct btree_reserve *reserve;
struct btree_update *as;
- if (unlikely(!percpu_ref_tryget(&c->writes)))
- return ERR_PTR(-EROFS);
-
reserve = bch2_btree_reserve_get(c, nr_nodes, flags, cl);
- if (IS_ERR(reserve)) {
- percpu_ref_put(&c->writes);
+ if (IS_ERR(reserve))
return ERR_CAST(reserve);
- }
as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
memset(as, 0, sizeof(*as));
{
struct bch_fs *c = as->c;
struct btree *old = btree_node_root(c, b);
- struct bch_fs_usage stats = { 0 };
+ struct bch_fs_usage *fs_usage;
__bch2_btree_set_root_inmem(c, b);
mutex_lock(&c->btree_interior_update_lock);
- percpu_down_read_preempt_disable(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ fs_usage = bch2_fs_usage_get_scratch(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0,
gc_pos_btree_root(b->btree_id),
- &stats, 0, 0);
+ fs_usage, 0, 0);
if (old && !btree_node_fake(old))
bch2_btree_node_free_index(as, NULL,
bkey_i_to_s_c(&old->key),
- &stats);
- bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
+ fs_usage);
+ bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id));
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
}
struct btree_node_iter *node_iter)
{
struct bch_fs *c = as->c;
- struct bch_fs_usage stats = { 0 };
+ struct bch_fs_usage *fs_usage;
struct bkey_packed *k;
struct bkey tmp;
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b));
mutex_lock(&c->btree_interior_update_lock);
- percpu_down_read_preempt_disable(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ fs_usage = bch2_fs_usage_get_scratch(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0,
- gc_pos_btree_node(b), &stats, 0, 0);
+ gc_pos_btree_node(b), fs_usage, 0, 0);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
bkey_iter_pos_cmp(b, &insert->k.p, k) > 0)
if (k && !bkey_cmp_packed(b, k, &insert->k))
bch2_btree_node_free_index(as, b,
bkey_disassemble(b, k, &tmp),
- &stats);
+ fs_usage);
- bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
+ bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res,
gc_pos_btree_node(b));
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
bch2_btree_bset_insert_key(iter, b, node_iter, insert);
btree_interior_update_add_node_reference(as, b);
+ /*
+ * XXX: the rest of the update path treats this like we're actually
+ * inserting a new node and deleting the existing node, so the
+ * reservation needs to include enough space for @b
+ *
+ * that is actually sketch as fuck though and I am surprised the code
+ * seems to work like that, definitely need to go back and rework it
+ * into something saner.
+ *
+ * (I think @b is just getting double counted until the btree update
+ * finishes and "deletes" @b on disk)
+ */
+ ret = bch2_disk_reservation_add(c, &as->reserve->disk_res,
+ c->opts.btree_node_size *
+ bch2_bkey_nr_ptrs(bkey_i_to_s_c(&new_key->k_i)),
+ BCH_DISK_RESERVATION_NOFAIL|
+ BCH_DISK_RESERVATION_GC_LOCK_HELD);
+ BUG_ON(ret);
+
parent = btree_node_parent(iter, b);
if (parent) {
if (new_hash) {
bkey_copy(&b->key, &new_key->k_i);
}
} else {
- struct bch_fs_usage stats = { 0 };
+ struct bch_fs_usage *fs_usage;
BUG_ON(btree_node_root(c, b) != b);
bch2_btree_node_lock_write(b, iter);
mutex_lock(&c->btree_interior_update_lock);
- percpu_down_read_preempt_disable(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ fs_usage = bch2_fs_usage_get_scratch(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0,
gc_pos_btree_root(b->btree_id),
- &stats, 0, 0);
+ fs_usage, 0, 0);
bch2_btree_node_free_index(as, NULL,
bkey_i_to_s_c(&b->key),
- &stats);
- bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
+ fs_usage);
+ bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id));
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
BUG_ON(btree_node_root(c, b));
__bch2_btree_set_root_inmem(c, b);
- bch2_btree_set_root_ondisk(c, b, READ);
}
void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)