#include "btree_iter.h"
#include "btree_locking.h"
#include "buckets.h"
+#include "clock.h"
#include "error.h"
#include "extents.h"
#include "journal.h"
#include "recovery.h"
#include "replicas.h"
#include "super-io.h"
+#include "trace.h"
#include <linux/random.h>
-#include <trace/events/bcachefs.h>
static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
struct btree_path *, struct btree *,
{
trace_and_count(c, btree_node_free, c, b);
+ BUG_ON(btree_node_write_blocked(b));
BUG_ON(btree_node_dirty(b));
BUG_ON(btree_node_need_write(b));
BUG_ON(b == btree_node_root(c, b));
struct bch_fs *c = trans->c;
struct write_point *wp;
struct btree *b;
- __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
+ BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
struct open_buckets ob = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
unsigned nr_reserve;
bch2_open_bucket_get(c, wp, &ob);
bch2_alloc_sectors_done(c, wp);
mem_alloc:
- b = bch2_btree_node_mem_alloc(c, interior_node);
+ b = bch2_btree_node_mem_alloc(trans, interior_node);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
BUG_ON(ret);
trace_and_count(c, btree_node_alloc, c, b);
+ bch2_increment_clock(c, btree_sectors(c), WRITE);
return b;
}
bch2_trans_unlock(&trans);
btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_intent);
mark_btree_node_locked(&trans, path, b->c.level, SIX_LOCK_intent);
- bch2_btree_path_level_init(&trans, path, b);
+ path->l[b->c.level].lock_seq = b->c.lock.state.seq;
+ path->l[b->c.level].b = b;
bch2_btree_node_lock_write_nofail(&trans, path, &b->c);
BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
BUG_ON(!btree_node_dirty(b));
+ BUG_ON(!b->c.level);
as->mode = BTREE_INTERIOR_UPDATING_NODE;
as->b = b;
clear_btree_node_dirty_acct(c, b);
clear_btree_node_need_write(b);
+ clear_btree_node_write_blocked(b);
/*
* Does this node have unwritten data that has a pin on the journal?
out[i]->needs_whiteout = false;
btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
- out[i] = bkey_next(out[i]);
+ out[i] = bkey_p_next(out[i]);
}
for (i = 0; i < 2; i++) {
BUG_ON(!as || as->b);
bch2_verify_keylist_sorted(keys);
- if (!(local_clock() & 63))
+ if ((local_clock() & 63) == 63)
return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
ret = bch2_btree_node_lock_write(trans, path, &b->c);
* We could attempt to avoid the transaction restart, by calling
* bch2_btree_path_upgrade() and allocating more nodes:
*/
- if (b->c.level >= as->update_level)
+ if (b->c.level >= as->update_level) {
+ trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
+ }
return btree_split(as, trans, path, b, keys, flags);
}
struct async_btree_rewrite {
struct bch_fs *c;
struct work_struct work;
+ struct list_head list;
enum btree_id btree_id;
unsigned level;
struct bpos pos;
static int async_btree_node_rewrite_trans(struct btree_trans *trans,
struct async_btree_rewrite *a)
{
+ struct bch_fs *c = trans->c;
struct btree_iter iter;
struct btree *b;
int ret;
if (ret)
goto out;
- if (!b || b->data->keys.seq != a->seq)
+ if (!b || b->data->keys.seq != a->seq) {
+ struct printbuf buf = PRINTBUF;
+
+ if (b)
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ else
+ prt_str(&buf, "(null");
+ bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s",
+ __func__, a->seq, buf.buf);
+ printbuf_exit(&buf);
goto out;
+ }
ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
out:
struct async_btree_rewrite *a =
container_of(work, struct async_btree_rewrite, work);
struct bch_fs *c = a->c;
+ int ret;
- bch2_trans_do(c, NULL, NULL, 0,
+ ret = bch2_trans_do(c, NULL, NULL, 0,
async_btree_node_rewrite_trans(&trans, a));
- percpu_ref_put(&c->writes);
+ if (ret)
+ bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
+ bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
kfree(a);
}
void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
{
struct async_btree_rewrite *a;
-
- if (!percpu_ref_tryget_live(&c->writes))
- return;
+ int ret;
a = kmalloc(sizeof(*a), GFP_NOFS);
if (!a) {
- percpu_ref_put(&c->writes);
+ bch_err(c, "%s: error allocating memory", __func__);
return;
}
a->level = b->c.level;
a->pos = b->key.k.p;
a->seq = b->data->keys.seq;
-
INIT_WORK(&a->work, async_btree_node_rewrite_work);
+
+ if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) {
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_add(&a->list, &c->pending_node_rewrites);
+ mutex_unlock(&c->pending_node_rewrites_lock);
+ return;
+ }
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
+ if (test_bit(BCH_FS_STARTED, &c->flags)) {
+ bch_err(c, "%s: error getting c->writes ref", __func__);
+ kfree(a);
+ return;
+ }
+
+ ret = bch2_fs_read_write_early(c);
+ if (ret) {
+ bch_err(c, "%s: error going read-write: %s",
+ __func__, bch2_err_str(ret));
+ kfree(a);
+ return;
+ }
+
+ bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
+ }
+
queue_work(c->btree_interior_update_worker, &a->work);
}
+void bch2_do_pending_node_rewrites(struct bch_fs *c)
+{
+ struct async_btree_rewrite *a, *n;
+
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
+ list_del(&a->list);
+
+ bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
+ queue_work(c->btree_interior_update_worker, &a->work);
+ }
+ mutex_unlock(&c->pending_node_rewrites_lock);
+}
+
+void bch2_free_pending_node_rewrites(struct bch_fs *c)
+{
+ struct async_btree_rewrite *a, *n;
+
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
+ list_del(&a->list);
+
+ kfree(a);
+ }
+ mutex_unlock(&c->pending_node_rewrites_lock);
+}
+
static int __bch2_btree_node_update_key(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b, struct btree *new_hash,
btree_path_set_level_up(trans, iter2.path);
- bch2_btree_path_check_sort(trans, iter2.path, 0);
+ trans->paths_sorted = false;
ret = bch2_btree_iter_traverse(&iter2) ?:
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN);
return ret;
}
- new_hash = bch2_btree_node_mem_alloc(c, false);
+ new_hash = bch2_btree_node_mem_alloc(trans, false);
}
path->intent_ref++;
bch2_btree_set_root_inmem(c, b);
}
-void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
+static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
{
+ struct bch_fs *c = trans->c;
struct closure cl;
struct btree *b;
int ret;
closure_sync(&cl);
} while (ret);
- b = bch2_btree_node_mem_alloc(c, false);
+ b = bch2_btree_node_mem_alloc(trans, false);
bch2_btree_cache_cannibalize_unlock(c);
set_btree_node_fake(b);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
+ return 0;
+}
+
+void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
+{
+ bch2_trans_run(c, __bch2_btree_root_alloc(&trans, id));
}
void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
return ret;
}
-void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset)
+void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry)
{
- struct btree_root *r;
- struct jset_entry *entry;
+ struct btree_root *r = &c->btree_roots[entry->btree_id];
mutex_lock(&c->btree_root_lock);
- vstruct_for_each(jset, entry)
- if (entry->type == BCH_JSET_ENTRY_btree_root) {
- r = &c->btree_roots[entry->btree_id];
- r->level = entry->level;
- r->alive = true;
- bkey_copy(&r->key, &entry->start[0]);
- }
+ r->level = entry->level;
+ r->alive = true;
+ bkey_copy(&r->key, &entry->start[0]);
mutex_unlock(&c->btree_root_lock);
}
BCH_JSET_ENTRY_btree_root,
i, c->btree_roots[i].level,
&c->btree_roots[i].key,
- c->btree_roots[i].key.u64s);
+ c->btree_roots[i].key.k.u64s);
end = vstruct_next(end);
}
mutex_init(&c->btree_interior_update_lock);
INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work);
+ INIT_LIST_HEAD(&c->pending_node_rewrites);
+ mutex_init(&c->pending_node_rewrites_lock);
+
c->btree_interior_update_worker =
alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
if (!c->btree_interior_update_worker)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
- return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
- sizeof(struct btree_update));
+ if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
+ sizeof(struct btree_update)))
+ return -BCH_ERR_ENOMEM_btree_interior_update_pool_init;
+
+ return 0;
}