#include <linux/sched/mm.h>
#include <trace/events/bcachefs.h>
+struct lock_class_key bch2_btree_node_lock_key;
+
+const char * const bch2_btree_node_flags[] = {
+#define x(f) #f,
+ BTREE_FLAGS()
+#undef x
+ NULL
+};
+
void bch2_recalc_btree_reserve(struct bch_fs *c)
{
unsigned i, reserve = 16;
return max_t(int, 0, bc->used - bc->reserve);
}
-static void __btree_node_data_free(struct bch_fs *c, struct btree *b)
+static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
{
+ if (b->c.lock.readers)
+ list_move(&b->list, &bc->freed_pcpu);
+ else
+ list_move(&b->list, &bc->freed_nonpcpu);
+}
+
+static void btree_node_data_free(struct bch_fs *c, struct btree *b)
+{
+ struct btree_cache *bc = &c->btree_cache;
+
EBUG_ON(btree_node_write_in_flight(b));
kvpfree(b->data, btree_bytes(c));
b->data = NULL;
+#ifdef __KERNEL__
vfree(b->aux_data);
+#else
+ munmap(b->aux_data, btree_aux_data_bytes(b));
+#endif
b->aux_data = NULL;
-}
-
-static void btree_node_data_free(struct bch_fs *c, struct btree *b)
-{
- struct btree_cache *bc = &c->btree_cache;
- __btree_node_data_free(c, b);
bc->used--;
- list_move(&b->list, &bc->freed);
+
+ btree_node_to_freedlist(bc, b);
}
static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
b->data = kvpmalloc(btree_bytes(c), gfp);
if (!b->data)
return -ENOMEM;
-
+#ifdef __KERNEL__
b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
+#else
+ b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+ if (b->aux_data == MAP_FAILED)
+ b->aux_data = NULL;
+#endif
if (!b->aux_data) {
kvpfree(b->data, btree_bytes(c));
b->data = NULL;
return NULL;
bkey_btree_ptr_init(&b->key);
- six_lock_init(&b->c.lock);
+ __six_lock_init(&b->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
b->byte_order = ilog2(btree_bytes(c));
return b;
}
-static struct btree *btree_node_mem_alloc(struct bch_fs *c)
+struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b = __btree_node_mem_alloc(c);
void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
{
- rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+ int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+ BUG_ON(ret);
/* Cause future lookups for this node to fail: */
b->hash_val = 0;
b->c.level = level;
b->c.btree_id = id;
- if (level)
- six_lock_pcpu_alloc(&b->c.lock);
- else
- six_lock_pcpu_free_rcu(&b->c.lock);
-
mutex_lock(&bc->lock);
ret = __bch2_btree_node_hash_insert(bc, b);
if (!ret)
int ret = 0;
lockdep_assert_held(&bc->lock);
+wait_on_io:
+ if (b->flags & ((1U << BTREE_NODE_dirty)|
+ (1U << BTREE_NODE_read_in_flight)|
+ (1U << BTREE_NODE_write_in_flight))) {
+ if (!flush)
+ return -ENOMEM;
+
+ /* XXX: waiting on IO with btree cache lock held */
+ bch2_btree_node_wait_on_read(b);
+ bch2_btree_node_wait_on_write(b);
+ }
if (!six_trylock_intent(&b->c.lock))
return -ENOMEM;
if (!six_trylock_write(&b->c.lock))
goto out_unlock_intent;
- if (btree_node_noevict(b))
- goto out_unlock;
-
- if (!btree_node_may_write(b))
- goto out_unlock;
+ /* recheck under lock */
+ if (b->flags & ((1U << BTREE_NODE_read_in_flight)|
+ (1U << BTREE_NODE_write_in_flight))) {
+ if (!flush)
+ goto out_unlock;
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ goto wait_on_io;
+ }
- if (btree_node_dirty(b) &&
- test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
+ if (btree_node_noevict(b) ||
+ btree_node_write_blocked(b) ||
+ btree_node_will_make_reachable(b))
goto out_unlock;
- if (btree_node_dirty(b) ||
- btree_node_write_in_flight(b) ||
- btree_node_read_in_flight(b)) {
+ if (btree_node_dirty(b)) {
if (!flush)
goto out_unlock;
-
- wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
- TASK_UNINTERRUPTIBLE);
-
/*
* Using the underscore version because we don't want to compact
* bsets after the write, since this node is about to be evicted
* the post write cleanup:
*/
if (bch2_verify_btree_ondisk)
- bch2_btree_node_write(c, b, SIX_LOCK_intent);
+ bch2_btree_node_write(c, b, SIX_LOCK_intent, 0);
else
- __bch2_btree_node_write(c, b, SIX_LOCK_read);
+ __bch2_btree_node_write(c, b, 0);
- /* wait for any in flight btree write */
- btree_node_wait_on_io(b);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ goto wait_on_io;
}
out:
if (b->hash_val && !ret)
struct btree_cache *bc = &c->btree_cache;
struct btree *b, *t;
unsigned long nr = sc->nr_to_scan;
- unsigned long can_free;
+ unsigned long can_free = 0;
unsigned long touched = 0;
unsigned long freed = 0;
unsigned i, flags;
+ unsigned long ret = SHRINK_STOP;
if (bch2_btree_shrinker_disabled)
return SHRINK_STOP;
if (sc->gfp_mask & __GFP_FS)
mutex_lock(&bc->lock);
else if (!mutex_trylock(&bc->lock))
- return -1;
+ goto out_norestore;
flags = memalloc_nofs_save();
* succeed, so that inserting keys into the btree can always succeed and
* IO can always make forward progress:
*/
- nr /= btree_pages(c);
can_free = btree_cache_can_free(bc);
nr = min_t(unsigned long, nr, can_free);
i = 0;
list_for_each_entry_safe(b, t, &bc->freeable, list) {
+ /*
+ * Leave a few nodes on the freeable list, so that a btree split
+ * won't have to hit the system allocator:
+ */
+ if (++i <= 3)
+ continue;
+
touched++;
- if (freed >= nr)
+ if (touched >= nr)
break;
- if (++i > 3 &&
- !btree_node_reclaim(c, b)) {
+ if (!btree_node_reclaim(c, b)) {
btree_node_data_free(c, b);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
}
restart:
list_for_each_entry_safe(b, t, &bc->live, list) {
- touched++;
-
- if (freed >= nr) {
- /* Save position */
- if (&t->list != &bc->live)
- list_move_tail(&bc->live, &t->list);
- break;
+ /* tweak this */
+ if (btree_node_accessed(b)) {
+ clear_btree_node_accessed(b);
+ goto touched;
}
- if (!btree_node_accessed(b) &&
- !btree_node_reclaim(c, b)) {
+ if (!btree_node_reclaim(c, b)) {
/* can't call bch2_btree_node_hash_remove under lock */
freed++;
if (&t->list != &bc->live)
else if (!mutex_trylock(&bc->lock))
goto out;
goto restart;
- } else
- clear_btree_node_accessed(b);
+ } else {
+ continue;
+ }
+touched:
+ touched++;
+
+ if (touched >= nr) {
+ /* Save position */
+ if (&t->list != &bc->live)
+ list_move_tail(&bc->live, &t->list);
+ break;
+ }
}
mutex_unlock(&bc->lock);
out:
+ ret = freed;
memalloc_nofs_restore(flags);
- return (unsigned long) freed * btree_pages(c);
+out_norestore:
+ trace_btree_cache_scan(sc->nr_to_scan, can_free, ret);
+ return ret;
}
static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
if (bch2_btree_shrinker_disabled)
return 0;
- return btree_cache_can_free(bc) * btree_pages(c);
+ return btree_cache_can_free(bc);
}
void bch2_fs_btree_cache_exit(struct bch_fs *c)
flags = memalloc_nofs_save();
mutex_lock(&bc->lock);
-#ifdef CONFIG_BCACHEFS_DEBUG
if (c->verify_data)
list_move(&c->verify_data->list, &bc->live);
kvpfree(c->verify_ondisk, btree_bytes(c));
-#endif
for (i = 0; i < BTREE_ID_NR; i++)
if (c->btree_roots[i].b)
if (btree_node_dirty(b))
bch2_btree_complete_write(c, b, btree_current_write(b));
- clear_btree_node_dirty(c, b);
+ clear_btree_node_dirty_acct(c, b);
btree_node_data_free(c, b);
}
BUG_ON(atomic_read(&c->btree_cache.dirty));
- while (!list_empty(&bc->freed)) {
- b = list_first_entry(&bc->freed, struct btree, list);
+ list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
+
+ while (!list_empty(&bc->freed_nonpcpu)) {
+ b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
list_del(&b->list);
six_lock_pcpu_free(&b->c.lock);
kfree(b);
bch2_recalc_btree_reserve(c);
for (i = 0; i < bc->reserve; i++)
- if (!btree_node_mem_alloc(c)) {
+ if (!__bch2_btree_node_mem_alloc(c)) {
ret = -ENOMEM;
goto out;
}
list_splice_init(&bc->live, &bc->freeable);
-#ifdef CONFIG_BCACHEFS_DEBUG
mutex_init(&c->verify_lock);
- c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL);
- if (!c->verify_ondisk) {
- ret = -ENOMEM;
- goto out;
- }
-
- c->verify_data = btree_node_mem_alloc(c);
- if (!c->verify_data) {
- ret = -ENOMEM;
- goto out;
- }
-
- list_del_init(&c->verify_data->list);
-#endif
-
bc->shrink.count_objects = bch2_btree_cache_count;
bc->shrink.scan_objects = bch2_btree_cache_scan;
bc->shrink.seeks = 4;
- bc->shrink.batch = btree_pages(c) * 2;
ret = register_shrinker(&bc->shrink);
out:
pr_verbose_init(c->opts, "ret %i", ret);
mutex_init(&bc->lock);
INIT_LIST_HEAD(&bc->live);
INIT_LIST_HEAD(&bc->freeable);
- INIT_LIST_HEAD(&bc->freed);
+ INIT_LIST_HEAD(&bc->freed_pcpu);
+ INIT_LIST_HEAD(&bc->freed_nonpcpu);
}
/*
}
}
-struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
+struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks)
{
struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
+ struct list_head *freed = pcpu_read_locks
+ ? &bc->freed_pcpu
+ : &bc->freed_nonpcpu;
+ struct btree *b, *b2;
u64 start_time = local_clock();
unsigned flags;
flags = memalloc_nofs_save();
mutex_lock(&bc->lock);
- /*
- * btree_free() doesn't free memory; it sticks the node on the end of
- * the list. Check if there's any freed nodes there:
- */
- list_for_each_entry(b, &bc->freeable, list)
- if (!btree_node_reclaim(c, b))
- goto got_node;
-
/*
* We never free struct btree itself, just the memory that holds the on
* disk node. Check the freed list before allocating a new one:
*/
- list_for_each_entry(b, &bc->freed, list)
- if (!btree_node_reclaim(c, b))
+ list_for_each_entry(b, freed, list)
+ if (!btree_node_reclaim(c, b)) {
+ list_del_init(&b->list);
goto got_node;
+ }
- b = NULL;
+ b = __btree_node_mem_alloc(c);
+ if (!b)
+ goto err_locked;
+
+ if (pcpu_read_locks)
+ six_lock_pcpu_alloc(&b->c.lock);
+
+ BUG_ON(!six_trylock_intent(&b->c.lock));
+ BUG_ON(!six_trylock_write(&b->c.lock));
got_node:
- if (b)
- list_del_init(&b->list);
- mutex_unlock(&bc->lock);
- if (!b) {
- b = __btree_node_mem_alloc(c);
- if (!b)
- goto err;
+ /*
+ * btree_free() doesn't free memory; it sticks the node on the end of
+ * the list. Check if there's any freed nodes there:
+ */
+ list_for_each_entry(b2, &bc->freeable, list)
+ if (!btree_node_reclaim(c, b2)) {
+ swap(b->data, b2->data);
+ swap(b->aux_data, b2->aux_data);
+ btree_node_to_freedlist(bc, b2);
+ six_unlock_write(&b2->c.lock);
+ six_unlock_intent(&b2->c.lock);
+ goto got_mem;
+ }
- BUG_ON(!six_trylock_intent(&b->c.lock));
- BUG_ON(!six_trylock_write(&b->c.lock));
- }
+ mutex_unlock(&bc->lock);
- if (!b->data) {
- if (btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL))
- goto err;
+ if (btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL))
+ goto err;
- mutex_lock(&bc->lock);
- bc->used++;
- mutex_unlock(&bc->lock);
- }
+ mutex_lock(&bc->lock);
+ bc->used++;
+got_mem:
+ mutex_unlock(&bc->lock);
BUG_ON(btree_node_hashed(b));
+ BUG_ON(btree_node_dirty(b));
BUG_ON(btree_node_write_in_flight(b));
out:
b->flags = 0;
b->sib_u64s[1] = 0;
b->whiteout_u64s = 0;
bch2_btree_keys_init(b);
+ set_btree_node_accessed(b);
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
start_time);
return b;
err:
mutex_lock(&bc->lock);
-
- if (b) {
- list_add(&b->list, &bc->freed);
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- }
-
+err_locked:
/* Try to cannibalize another cached btree node: */
if (bc->alloc_lock == current) {
- b = btree_node_cannibalize(c);
- list_del_init(&b->list);
- mutex_unlock(&bc->lock);
+ b2 = btree_node_cannibalize(c);
+ bch2_btree_node_hash_remove(bc, b2);
+
+ if (b) {
+ swap(b->data, b2->data);
+ swap(b->aux_data, b2->aux_data);
+ btree_node_to_freedlist(bc, b2);
+ six_unlock_write(&b2->c.lock);
+ six_unlock_intent(&b2->c.lock);
+ } else {
+ b = b2;
+ list_del_init(&b->list);
+ }
- bch2_btree_node_hash_remove(bc, b);
+ mutex_unlock(&bc->lock);
trace_btree_node_cannibalize(c);
goto out;
/* Slowpath, don't want it inlined into btree_iter_traverse() */
static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
- struct btree_iter *iter,
+ struct btree_trans *trans,
+ struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id,
unsigned level,
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
+ u32 seq;
BUG_ON(level + 1 >= BTREE_MAX_DEPTH);
/*
* Parent node must be locked, else we could read in a btree node that's
* been freed:
*/
- if (iter && !bch2_btree_node_relock(iter, level + 1))
+ if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
+ trace_trans_restart_relock_parent_for_fill(trans->fn,
+ _THIS_IP_, btree_id, &path->pos);
+ btree_trans_restart(trans);
return ERR_PTR(-EINTR);
+ }
+
+ b = bch2_btree_node_mem_alloc(c, level != 0);
+
+ if (trans && b == ERR_PTR(-ENOMEM)) {
+ trans->memory_allocation_failure = true;
+ trace_trans_restart_memory_allocation_failure(trans->fn,
+ _THIS_IP_, btree_id, &path->pos);
+ btree_trans_restart(trans);
+ return ERR_PTR(-EINTR);
+ }
- b = bch2_btree_node_mem_alloc(c);
if (IS_ERR(b))
return b;
return NULL;
}
- /*
- * Unlock before doing IO:
- *
- * XXX: ideally should be dropping all btree node locks here
- */
- if (iter && btree_node_read_locked(iter, level + 1))
- btree_node_unlock(iter, level + 1);
-
- bch2_btree_node_read(c, b, sync);
+ set_btree_node_read_in_flight(b);
six_unlock_write(&b->c.lock);
+ seq = b->c.lock.state.seq;
+ six_unlock_intent(&b->c.lock);
- if (!sync) {
- six_unlock_intent(&b->c.lock);
+ /* Unlock before doing IO: */
+ if (trans && sync)
+ bch2_trans_unlock(trans);
+
+ bch2_btree_node_read(c, b, sync);
+
+ if (!sync)
return NULL;
+
+ if (trans &&
+ (!bch2_trans_relock(trans) ||
+ !bch2_btree_path_relock_intent(trans, path))) {
+ BUG_ON(!trans->restarted);
+ return ERR_PTR(-EINTR);
}
- if (lock_type == SIX_LOCK_read)
- six_lock_downgrade(&b->c.lock);
+ if (!six_relock_type(&b->c.lock, lock_type, seq)) {
+ trace_trans_restart_relock_after_fill(trans->fn, _THIS_IP_,
+ btree_id, &path->pos);
+ btree_trans_restart(trans);
+ return ERR_PTR(-EINTR);
+ }
return b;
}
return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1;
}
+static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
+{
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ struct printbuf buf3 = PRINTBUF;
+
+ if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
+ return;
+
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&b->key));
+ bch2_bpos_to_text(&buf2, b->data->min_key);
+ bch2_bpos_to_text(&buf3, b->data->max_key);
+
+ bch2_fs_inconsistent(c, "btree node header doesn't match ptr\n"
+ "btree %s level %u\n"
+ "ptr: %s\n"
+ "header: btree %s level %llu\n"
+ "min %s max %s\n",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1.buf,
+ bch2_btree_ids[BTREE_NODE_ID(b->data)],
+ BTREE_NODE_LEVEL(b->data),
+ buf2.buf, buf3.buf);
+
+ printbuf_exit(&buf3);
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+}
+
+static inline void btree_check_header(struct bch_fs *c, struct btree *b)
+{
+ if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
+ b->c.level != BTREE_NODE_LEVEL(b->data) ||
+ bpos_cmp(b->data->max_key, b->key.k.p) ||
+ (b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ bpos_cmp(b->data->min_key,
+ bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
+ btree_bad_header(c, b);
+}
+
/**
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary.
* The btree node will have either a read or a write lock held, depending on
* the @write parameter.
*/
-struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
+struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
const struct bkey_i *k, unsigned level,
enum six_lock_type lock_type,
unsigned long trace_ip)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
EBUG_ON(level >= BTREE_MAX_DEPTH);
b = btree_node_mem_ptr(k);
- if (b)
- goto lock_node;
+
+ /*
+ * Check b->hash_val _before_ calling btree_node_lock() - this might not
+ * be the node we want anymore, and trying to lock the wrong node could
+ * cause an unneccessary transaction restart:
+ */
+ if (likely(c->opts.btree_node_mem_ptr_optimization &&
+ b &&
+ b->hash_val == btree_ptr_hash_val(k)))
+ goto lock_node;
retry:
b = btree_cache_find(bc, k);
if (unlikely(!b)) {
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch2_btree_node_fill(c, iter, k, iter->btree_id,
+ b = bch2_btree_node_fill(c, trans, path, k, path->btree_id,
level, lock_type, true);
/* We raced and found the btree node in the cache */
* the parent was modified, when the pointer to the node we want
* was removed - and we'll bail out:
*/
- if (btree_node_read_locked(iter, level + 1))
- btree_node_unlock(iter, level + 1);
+ if (btree_node_read_locked(path, level + 1))
+ btree_node_unlock(path, level + 1);
- if (!btree_node_lock(b, k->k.p, level, iter, lock_type,
+ if (!btree_node_lock(trans, path, b, k->k.p, level, lock_type,
lock_node_check_fn, (void *) k, trace_ip)) {
- if (b->hash_val != btree_ptr_hash_val(k))
+ if (!trans->restarted)
goto retry;
return ERR_PTR(-EINTR);
}
b->c.level != level ||
race_fault())) {
six_unlock_type(&b->c.lock, lock_type);
- if (bch2_btree_node_relock(iter, level + 1))
+ if (bch2_btree_node_relock(trans, path, level + 1))
goto retry;
- trace_trans_restart_btree_node_reused(iter->trans->ip);
+ trace_trans_restart_btree_node_reused(trans->fn,
+ trace_ip,
+ path->btree_id,
+ &path->pos);
+ btree_trans_restart(trans);
return ERR_PTR(-EINTR);
}
}
- /* XXX: waiting on IO with btree locks held: */
- wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
- TASK_UNINTERRUPTIBLE);
+ if (unlikely(btree_node_read_in_flight(b))) {
+ u32 seq = b->c.lock.state.seq;
+
+ six_unlock_type(&b->c.lock, lock_type);
+ bch2_trans_unlock(trans);
+
+ bch2_btree_node_wait_on_read(b);
+
+ /*
+ * should_be_locked is not set on this path yet, so we need to
+ * relock it specifically:
+ */
+ if (trans &&
+ (!bch2_trans_relock(trans) ||
+ !bch2_btree_path_relock_intent(trans, path))) {
+ BUG_ON(!trans->restarted);
+ return ERR_PTR(-EINTR);
+ }
+
+ if (!six_relock_type(&b->c.lock, lock_type, seq))
+ goto retry;
+ }
prefetch(b->aux_data);
return ERR_PTR(-EIO);
}
- EBUG_ON(b->c.btree_id != iter->btree_id);
+ EBUG_ON(b->c.btree_id != path->btree_id);
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
- EBUG_ON(bpos_cmp(b->data->max_key, k->k.p));
- EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- bpos_cmp(b->data->min_key,
- bkey_i_to_btree_ptr_v2(&b->key)->v.min_key));
+ btree_check_header(c, b);
return b;
}
EBUG_ON(level >= BTREE_MAX_DEPTH);
- b = btree_node_mem_ptr(k);
- if (b)
- goto lock_node;
+ if (c->opts.btree_node_mem_ptr_optimization) {
+ b = btree_node_mem_ptr(k);
+ if (b)
+ goto lock_node;
+ }
retry:
b = btree_cache_find(bc, k);
if (unlikely(!b)) {
if (nofill)
goto out;
- b = bch2_btree_node_fill(c, NULL, k, btree_id,
+ b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id,
level, SIX_LOCK_read, true);
/* We raced and found the btree node in the cache */
}
/* XXX: waiting on IO with btree locks held: */
- wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
- TASK_UNINTERRUPTIBLE);
+ __bch2_btree_node_wait_on_read(b);
prefetch(b->aux_data);
EBUG_ON(b->c.btree_id != btree_id);
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
- EBUG_ON(bpos_cmp(b->data->max_key, k->k.p));
- EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- bpos_cmp(b->data->min_key,
- bkey_i_to_btree_ptr_v2(&b->key)->v.min_key));
+ btree_check_header(c, b);
out:
bch2_btree_cache_cannibalize_unlock(c);
return b;
}
-void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
- const struct bkey_i *k,
- enum btree_id btree_id, unsigned level)
+int bch2_btree_node_prefetch(struct bch_fs *c,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ const struct bkey_i *k,
+ enum btree_id btree_id, unsigned level)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- BUG_ON(iter && !btree_node_locked(iter, level + 1));
+ BUG_ON(trans && !btree_node_locked(path, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH);
b = btree_cache_find(bc, k);
if (b)
+ return 0;
+
+ b = bch2_btree_node_fill(c, trans, path, k, btree_id,
+ level, SIX_LOCK_read, false);
+ return PTR_ERR_OR_ZERO(b);
+}
+
+void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k)
+{
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b;
+
+ b = btree_cache_find(bc, k);
+ if (!b)
return;
+wait_on_io:
+ /* not allowed to wait on io with btree locks held: */
+
+ /* XXX we're called from btree_gc which will be holding other btree
+ * nodes locked
+ * */
+ __bch2_btree_node_wait_on_read(b);
+ __bch2_btree_node_wait_on_write(b);
+
+ six_lock_intent(&b->c.lock, NULL, NULL);
+ six_lock_write(&b->c.lock, NULL, NULL);
- bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
+ if (btree_node_dirty(b)) {
+ __bch2_btree_node_write(c, b, 0);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ goto wait_on_io;
+ }
+
+ BUG_ON(btree_node_dirty(b));
+
+ mutex_lock(&bc->lock);
+ btree_node_data_free(c, b);
+ bch2_btree_node_hash_remove(bc, b);
+ mutex_unlock(&bc->lock);
+
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
}
void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,