#include "btree_iter.h"
#include "btree_locking.h"
#include "debug.h"
+#include "errcode.h"
#include "error.h"
+#include "journal.h"
+#include "trace.h"
#include <linux/prefetch.h>
#include <linux/sched/mm.h>
-#include <trace/events/bcachefs.h>
-struct lock_class_key bch2_btree_node_lock_key;
+const char * const bch2_btree_node_flags[] = {
+#define x(f) #f,
+ BTREE_FLAGS()
+#undef x
+ NULL
+};
void bch2_recalc_btree_reserve(struct bch_fs *c)
{
unsigned i, reserve = 16;
- if (!c->btree_roots[0].b)
+ if (!c->btree_roots_known[0].b)
reserve += 8;
- for (i = 0; i < BTREE_ID_NR; i++)
- if (c->btree_roots[i].b)
- reserve += min_t(unsigned, 1,
- c->btree_roots[i].b->c.level) * 8;
+ for (i = 0; i < btree_id_nr_alive(c); i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (r->b)
+ reserve += min_t(unsigned, 1, r->b->c.level) * 8;
+ }
c->btree_cache.reserve = reserve;
}
return max_t(int, 0, bc->used - bc->reserve);
}
+static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
+{
+ if (b->c.lock.readers)
+ list_move(&b->list, &bc->freed_pcpu);
+ else
+ list_move(&b->list, &bc->freed_nonpcpu);
+}
+
static void btree_node_data_free(struct bch_fs *c, struct btree *b)
{
struct btree_cache *bc = &c->btree_cache;
EBUG_ON(btree_node_write_in_flight(b));
+ clear_btree_node_just_written(b);
+
kvpfree(b->data, btree_bytes(c));
b->data = NULL;
#ifdef __KERNEL__
- vfree(b->aux_data);
+ kvfree(b->aux_data);
#else
munmap(b->aux_data, btree_aux_data_bytes(b));
#endif
b->aux_data = NULL;
bc->used--;
- list_move(&b->list, &bc->freed);
+
+ btree_node_to_freedlist(bc, b);
}
static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
b->data = kvpmalloc(btree_bytes(c), gfp);
if (!b->data)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
#ifdef __KERNEL__
- b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
+ b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp);
#else
b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+ if (b->aux_data == MAP_FAILED)
+ b->aux_data = NULL;
#endif
if (!b->aux_data) {
kvpfree(b->data, btree_bytes(c));
b->data = NULL;
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
}
return 0;
}
-static struct btree *__btree_node_mem_alloc(struct bch_fs *c)
+static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
{
- struct btree *b = kzalloc(sizeof(struct btree), GFP_KERNEL);
+ struct btree *b;
+
+ b = kzalloc(sizeof(struct btree), gfp);
if (!b)
return NULL;
bkey_btree_ptr_init(&b->key);
- __six_lock_init(&b->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
b->byte_order = ilog2(btree_bytes(c));
struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
{
struct btree_cache *bc = &c->btree_cache;
- struct btree *b = __btree_node_mem_alloc(c);
+ struct btree *b;
+
+ b = __btree_node_mem_alloc(c, GFP_KERNEL);
if (!b)
return NULL;
return NULL;
}
+ bch2_btree_lock_init(&b->c, 0);
+
bc->used++;
list_add(&b->list, &bc->freeable);
return b;
void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
{
- rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+ int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+
+ BUG_ON(ret);
/* Cause future lookups for this node to fail: */
b->hash_val = 0;
-
- six_lock_wakeup_all(&b->c.lock);
}
int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
b->c.level = level;
b->c.btree_id = id;
- if (level)
- six_lock_pcpu_alloc(&b->c.lock);
- else
- six_lock_pcpu_free_rcu(&b->c.lock);
-
mutex_lock(&bc->lock);
ret = __bch2_btree_node_hash_insert(bc, b);
if (!ret)
- list_add(&b->list, &bc->live);
+ list_add_tail(&b->list, &bc->live);
mutex_unlock(&bc->lock);
return ret;
(1U << BTREE_NODE_read_in_flight)|
(1U << BTREE_NODE_write_in_flight))) {
if (!flush)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_node_reclaim;
/* XXX: waiting on IO with btree cache lock held */
bch2_btree_node_wait_on_read(b);
}
if (!six_trylock_intent(&b->c.lock))
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_node_reclaim;
if (!six_trylock_write(&b->c.lock))
goto out_unlock_intent;
goto wait_on_io;
}
- if (btree_node_noevict(b))
- goto out_unlock;
-
- if (!btree_node_may_write(b))
+ if (btree_node_noevict(b) ||
+ btree_node_write_blocked(b) ||
+ btree_node_will_make_reachable(b))
goto out_unlock;
if (btree_node_dirty(b)) {
- if (!flush ||
- test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
+ if (!flush)
goto out_unlock;
/*
* Using the underscore version because we don't want to compact
* the post write cleanup:
*/
if (bch2_verify_btree_ondisk)
- bch2_btree_node_write(c, b, SIX_LOCK_intent);
+ bch2_btree_node_write(c, b, SIX_LOCK_intent,
+ BTREE_WRITE_cache_reclaim);
else
- __bch2_btree_node_write(c, b);
+ __bch2_btree_node_write(c, b,
+ BTREE_WRITE_cache_reclaim);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
}
out:
if (b->hash_val && !ret)
- trace_btree_node_reap(c, b);
+ trace_and_count(c, btree_cache_reap, c, b);
return ret;
out_unlock:
six_unlock_write(&b->c.lock);
out_unlock_intent:
six_unlock_intent(&b->c.lock);
- ret = -ENOMEM;
+ ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
goto out;
}
static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct bch_fs *c = container_of(shrink, struct bch_fs,
- btree_cache.shrink);
+ struct bch_fs *c = shrink->private_data;
struct btree_cache *bc = &c->btree_cache;
struct btree *b, *t;
unsigned long nr = sc->nr_to_scan;
- unsigned long can_free;
- unsigned long touched = 0;
+ unsigned long can_free = 0;
unsigned long freed = 0;
+ unsigned long touched = 0;
unsigned i, flags;
+ unsigned long ret = SHRINK_STOP;
+ bool trigger_writes = atomic_read(&bc->dirty) + nr >=
+ bc->used * 3 / 4;
if (bch2_btree_shrinker_disabled)
return SHRINK_STOP;
- /* Return -1 if we can't do anything right now */
- if (sc->gfp_mask & __GFP_FS)
- mutex_lock(&bc->lock);
- else if (!mutex_trylock(&bc->lock))
- return -1;
-
+ mutex_lock(&bc->lock);
flags = memalloc_nofs_save();
/*
* succeed, so that inserting keys into the btree can always succeed and
* IO can always make forward progress:
*/
- nr /= btree_pages(c);
can_free = btree_cache_can_free(bc);
nr = min_t(unsigned long, nr, can_free);
i = 0;
list_for_each_entry_safe(b, t, &bc->freeable, list) {
+ /*
+ * Leave a few nodes on the freeable list, so that a btree split
+ * won't have to hit the system allocator:
+ */
+ if (++i <= 3)
+ continue;
+
touched++;
- if (freed >= nr)
- break;
+ if (touched >= nr)
+ goto out;
- if (++i > 3 &&
- !btree_node_reclaim(c, b)) {
+ if (!btree_node_reclaim(c, b)) {
btree_node_data_free(c, b);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
list_for_each_entry_safe(b, t, &bc->live, list) {
touched++;
- if (freed >= nr) {
- /* Save position */
- if (&t->list != &bc->live)
- list_move_tail(&bc->live, &t->list);
- break;
- }
-
- if (!btree_node_accessed(b) &&
- !btree_node_reclaim(c, b)) {
- /* can't call bch2_btree_node_hash_remove under lock */
+ if (btree_node_accessed(b)) {
+ clear_btree_node_accessed(b);
+ } else if (!btree_node_reclaim(c, b)) {
freed++;
- if (&t->list != &bc->live)
- list_move_tail(&bc->live, &t->list);
-
btree_node_data_free(c, b);
- mutex_unlock(&bc->lock);
bch2_btree_node_hash_remove(bc, b);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
- if (freed >= nr)
- goto out;
-
- if (sc->gfp_mask & __GFP_FS)
- mutex_lock(&bc->lock);
- else if (!mutex_trylock(&bc->lock))
- goto out;
+ if (freed == nr)
+ goto out_rotate;
+ } else if (trigger_writes &&
+ btree_node_dirty(b) &&
+ !btree_node_will_make_reachable(b) &&
+ !btree_node_write_blocked(b) &&
+ six_trylock_read(&b->c.lock)) {
+ list_move(&bc->live, &b->list);
+ mutex_unlock(&bc->lock);
+ __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
+ six_unlock_read(&b->c.lock);
+ if (touched >= nr)
+ goto out_nounlock;
+ mutex_lock(&bc->lock);
goto restart;
- } else
- clear_btree_node_accessed(b);
- }
+ }
- mutex_unlock(&bc->lock);
+ if (touched >= nr)
+ break;
+ }
+out_rotate:
+ if (&t->list != &bc->live)
+ list_move_tail(&bc->live, &t->list);
out:
+ mutex_unlock(&bc->lock);
+out_nounlock:
+ ret = freed;
memalloc_nofs_restore(flags);
- return (unsigned long) freed * btree_pages(c);
+ trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret);
+ return ret;
}
static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct bch_fs *c = container_of(shrink, struct bch_fs,
- btree_cache.shrink);
+ struct bch_fs *c = shrink->private_data;
struct btree_cache *bc = &c->btree_cache;
if (bch2_btree_shrinker_disabled)
return 0;
- return btree_cache_can_free(bc) * btree_pages(c);
+ return btree_cache_can_free(bc);
}
void bch2_fs_btree_cache_exit(struct bch_fs *c)
struct btree *b;
unsigned i, flags;
- if (bc->shrink.list.next)
- unregister_shrinker(&bc->shrink);
+ shrinker_free(bc->shrink);
/* vfree() can allocate memory: */
flags = memalloc_nofs_save();
kvpfree(c->verify_ondisk, btree_bytes(c));
- for (i = 0; i < BTREE_ID_NR; i++)
- if (c->btree_roots[i].b)
- list_add(&c->btree_roots[i].b->list, &bc->live);
+ for (i = 0; i < btree_id_nr_alive(c); i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (r->b)
+ list_add(&r->b->list, &bc->live);
+ }
list_splice(&bc->freeable, &bc->live);
BUG_ON(btree_node_read_in_flight(b) ||
btree_node_write_in_flight(b));
- if (btree_node_dirty(b))
- bch2_btree_complete_write(c, b, btree_current_write(b));
- clear_btree_node_dirty(c, b);
-
btree_node_data_free(c, b);
}
- BUG_ON(atomic_read(&c->btree_cache.dirty));
+ BUG_ON(!bch2_journal_error(&c->journal) &&
+ atomic_read(&c->btree_cache.dirty));
- while (!list_empty(&bc->freed)) {
- b = list_first_entry(&bc->freed, struct btree, list);
+ list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
+
+ while (!list_empty(&bc->freed_nonpcpu)) {
+ b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
list_del(&b->list);
- six_lock_pcpu_free(&b->c.lock);
+ six_lock_exit(&b->c.lock);
kfree(b);
}
int bch2_fs_btree_cache_init(struct bch_fs *c)
{
struct btree_cache *bc = &c->btree_cache;
+ struct shrinker *shrink;
unsigned i;
int ret = 0;
- pr_verbose_init(c->opts, "");
-
ret = rhashtable_init(&bc->table, &bch_btree_cache_params);
if (ret)
- goto out;
+ goto err;
bc->table_init_done = true;
bch2_recalc_btree_reserve(c);
for (i = 0; i < bc->reserve; i++)
- if (!__bch2_btree_node_mem_alloc(c)) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!__bch2_btree_node_mem_alloc(c))
+ goto err;
list_splice_init(&bc->live, &bc->freeable);
mutex_init(&c->verify_lock);
- bc->shrink.count_objects = bch2_btree_cache_count;
- bc->shrink.scan_objects = bch2_btree_cache_scan;
- bc->shrink.seeks = 4;
- bc->shrink.batch = btree_pages(c) * 2;
- ret = register_shrinker(&bc->shrink);
-out:
- pr_verbose_init(c->opts, "ret %i", ret);
- return ret;
+ shrink = shrinker_alloc(0, "%s-btree_cache", c->name);
+ if (!shrink)
+ goto err;
+ bc->shrink = shrink;
+ shrink->count_objects = bch2_btree_cache_count;
+ shrink->scan_objects = bch2_btree_cache_scan;
+ shrink->seeks = 4;
+ shrink->private_data = c;
+ shrinker_register(shrink);
+
+ return 0;
+err:
+ return -BCH_ERR_ENOMEM_fs_btree_cache_init;
}
void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
mutex_init(&bc->lock);
INIT_LIST_HEAD(&bc->live);
INIT_LIST_HEAD(&bc->freeable);
- INIT_LIST_HEAD(&bc->freed);
+ INIT_LIST_HEAD(&bc->freed_pcpu);
+ INIT_LIST_HEAD(&bc->freed_nonpcpu);
}
/*
* cannibalize_bucket() will take. This means every time we unlock the root of
* the btree, we need to release this lock if we have it held.
*/
-void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c)
+void bch2_btree_cache_cannibalize_unlock(struct btree_trans *trans)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
if (bc->alloc_lock == current) {
- trace_btree_node_cannibalize_unlock(c);
+ trace_and_count(c, btree_cache_cannibalize_unlock, trans);
bc->alloc_lock = NULL;
closure_wake_up(&bc->alloc_wait);
}
}
-int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
+int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure *cl)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct task_struct *old;
goto success;
if (!cl) {
- trace_btree_node_cannibalize_lock_fail(c);
- return -ENOMEM;
+ trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
+ return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
}
closure_wait(&bc->alloc_wait, cl);
goto success;
}
- trace_btree_node_cannibalize_lock_fail(c);
- return -EAGAIN;
+ trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
+ return -BCH_ERR_btree_cache_cannibalize_lock_blocked;
success:
- trace_btree_node_cannibalize_lock(c);
+ trace_and_count(c, btree_cache_cannibalize_lock, trans);
return 0;
}
}
}
-struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
+struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
+ struct list_head *freed = pcpu_read_locks
+ ? &bc->freed_pcpu
+ : &bc->freed_nonpcpu;
+ struct btree *b, *b2;
u64 start_time = local_clock();
unsigned flags;
flags = memalloc_nofs_save();
mutex_lock(&bc->lock);
- /*
- * btree_free() doesn't free memory; it sticks the node on the end of
- * the list. Check if there's any freed nodes there:
- */
- list_for_each_entry(b, &bc->freeable, list)
- if (!btree_node_reclaim(c, b))
- goto got_node;
-
/*
* We never free struct btree itself, just the memory that holds the on
* disk node. Check the freed list before allocating a new one:
*/
- list_for_each_entry(b, &bc->freed, list)
- if (!btree_node_reclaim(c, b))
+ list_for_each_entry(b, freed, list)
+ if (!btree_node_reclaim(c, b)) {
+ list_del_init(&b->list);
goto got_node;
+ }
- b = NULL;
-got_node:
- if (b)
- list_del_init(&b->list);
- mutex_unlock(&bc->lock);
-
+ b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
if (!b) {
- b = __btree_node_mem_alloc(c);
+ mutex_unlock(&bc->lock);
+ bch2_trans_unlock(trans);
+ b = __btree_node_mem_alloc(c, GFP_KERNEL);
if (!b)
goto err;
-
- BUG_ON(!six_trylock_intent(&b->c.lock));
- BUG_ON(!six_trylock_write(&b->c.lock));
+ mutex_lock(&bc->lock);
}
- if (!b->data) {
- if (btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL))
- goto err;
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
- mutex_lock(&bc->lock);
- bc->used++;
- mutex_unlock(&bc->lock);
+ BUG_ON(!six_trylock_intent(&b->c.lock));
+ BUG_ON(!six_trylock_write(&b->c.lock));
+got_node:
+
+ /*
+ * btree_free() doesn't free memory; it sticks the node on the end of
+ * the list. Check if there's any freed nodes there:
+ */
+ list_for_each_entry(b2, &bc->freeable, list)
+ if (!btree_node_reclaim(c, b2)) {
+ swap(b->data, b2->data);
+ swap(b->aux_data, b2->aux_data);
+ btree_node_to_freedlist(bc, b2);
+ six_unlock_write(&b2->c.lock);
+ six_unlock_intent(&b2->c.lock);
+ goto got_mem;
+ }
+
+ mutex_unlock(&bc->lock);
+
+ if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) {
+ bch2_trans_unlock(trans);
+ if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN))
+ goto err;
}
+ mutex_lock(&bc->lock);
+ bc->used++;
+got_mem:
+ mutex_unlock(&bc->lock);
+
BUG_ON(btree_node_hashed(b));
BUG_ON(btree_node_dirty(b));
BUG_ON(btree_node_write_in_flight(b));
err:
mutex_lock(&bc->lock);
- if (b) {
- list_add(&b->list, &bc->freed);
- six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- }
-
/* Try to cannibalize another cached btree node: */
if (bc->alloc_lock == current) {
- b = btree_node_cannibalize(c);
- list_del_init(&b->list);
- mutex_unlock(&bc->lock);
+ b2 = btree_node_cannibalize(c);
+ clear_btree_node_just_written(b2);
+ bch2_btree_node_hash_remove(bc, b2);
+
+ if (b) {
+ swap(b->data, b2->data);
+ swap(b->aux_data, b2->aux_data);
+ btree_node_to_freedlist(bc, b2);
+ six_unlock_write(&b2->c.lock);
+ six_unlock_intent(&b2->c.lock);
+ } else {
+ b = b2;
+ list_del_init(&b->list);
+ }
- bch2_btree_node_hash_remove(bc, b);
+ mutex_unlock(&bc->lock);
- trace_btree_node_cannibalize(c);
+ trace_and_count(c, btree_cache_cannibalize, trans);
goto out;
}
mutex_unlock(&bc->lock);
memalloc_nofs_restore(flags);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
}
/* Slowpath, don't want it inlined into btree_iter_traverse() */
-static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
- struct btree_iter *iter,
+static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
+ struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id,
unsigned level,
enum six_lock_type lock_type,
bool sync)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
u32 seq;
* Parent node must be locked, else we could read in a btree node that's
* been freed:
*/
- if (iter && !bch2_btree_node_relock(iter, level + 1))
- return ERR_PTR(-EINTR);
+ if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
+ trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
+ }
+
+ b = bch2_btree_node_mem_alloc(trans, level != 0);
+
+ if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
+ trans->memory_allocation_failure = true;
+ trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
+ }
- b = bch2_btree_node_mem_alloc(c);
if (IS_ERR(b))
return b;
set_btree_node_read_in_flight(b);
six_unlock_write(&b->c.lock);
- seq = b->c.lock.state.seq;
+ seq = six_lock_seq(&b->c.lock);
six_unlock_intent(&b->c.lock);
/* Unlock before doing IO: */
- if (iter && sync)
- bch2_trans_unlock(iter->trans);
+ if (path && sync)
+ bch2_trans_unlock_noassert(trans);
- bch2_btree_node_read(c, b, sync);
+ bch2_btree_node_read(trans, b, sync);
if (!sync)
return NULL;
- /*
- * XXX: this will probably always fail because btree_iter_relock()
- * currently fails for iterators that aren't pointed at a valid btree
- * node
- */
- if (iter && !bch2_trans_relock(iter->trans))
- return ERR_PTR(-EINTR);
+ if (path) {
+ int ret = bch2_trans_relock(trans) ?:
+ bch2_btree_path_relock_intent(trans, path);
+ if (ret) {
+ BUG_ON(!trans->restarted);
+ return ERR_PTR(ret);
+ }
+ }
- if (!six_relock_type(&b->c.lock, lock_type, seq))
- return ERR_PTR(-EINTR);
+ if (!six_relock_type(&b->c.lock, lock_type, seq)) {
+ if (path)
+ trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
+ }
return b;
}
-static int lock_node_check_fn(struct six_lock *lock, void *p)
-{
- struct btree *b = container_of(lock, struct btree, c.lock);
- const struct bkey_i *k = p;
-
- return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1;
-}
-
static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
{
- char buf1[100], buf2[100], buf3[100], buf4[100];
+ struct printbuf buf = PRINTBUF;
- if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
+ if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations)
return;
- bch2_bpos_to_text(&PBUF(buf1), b->key.k.type == KEY_TYPE_btree_ptr_v2
- ? bkey_i_to_btree_ptr_v2(&b->key)->v.min_key
- : POS_MIN);
- bch2_bpos_to_text(&PBUF(buf2), b->data->min_key);
-
- bch2_bpos_to_text(&PBUF(buf3), b->key.k.p);
- bch2_bpos_to_text(&PBUF(buf4), b->data->max_key);
- bch2_fs_inconsistent(c, "btree node header doesn't match ptr\n"
- "btree: ptr %u header %llu\n"
- "level: ptr %u header %llu\n"
- "min ptr %s node header %s\n"
- "max ptr %s node header %s",
- b->c.btree_id, BTREE_NODE_ID(b->data),
- b->c.level, BTREE_NODE_LEVEL(b->data),
- buf1, buf2, buf3, buf4);
+ prt_printf(&buf,
+ "btree node header doesn't match ptr\n"
+ "btree %s level %u\n"
+ "ptr: ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
+ prt_printf(&buf, "\nheader: btree %s level %llu\n"
+ "min ",
+ bch2_btree_id_str(BTREE_NODE_ID(b->data)),
+ BTREE_NODE_LEVEL(b->data));
+ bch2_bpos_to_text(&buf, b->data->min_key);
+
+ prt_printf(&buf, "\nmax ");
+ bch2_bpos_to_text(&buf, b->data->max_key);
+
+ bch2_fs_inconsistent(c, "%s", buf.buf);
+ printbuf_exit(&buf);
}
static inline void btree_check_header(struct bch_fs *c, struct btree *b)
{
if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
b->c.level != BTREE_NODE_LEVEL(b->data) ||
- bpos_cmp(b->data->max_key, b->key.k.p) ||
+ !bpos_eq(b->data->max_key, b->key.k.p) ||
(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
- bpos_cmp(b->data->min_key,
+ !bpos_eq(b->data->min_key,
bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
btree_bad_header(c, b);
}
-/**
- * bch_btree_node_get - find a btree node in the cache and lock it, reading it
- * in from disk if necessary.
- *
- * If IO is necessary and running under generic_make_request, returns -EAGAIN.
- *
- * The btree node will have either a read or a write lock held, depending on
- * the @write parameter.
- */
-struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
- const struct bkey_i *k, unsigned level,
- enum six_lock_type lock_type,
- unsigned long trace_ip)
+static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
+ const struct bkey_i *k, unsigned level,
+ enum six_lock_type lock_type,
+ unsigned long trace_ip)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
+ bool need_relock = false;
+ int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH);
-
- b = btree_node_mem_ptr(k);
- if (b)
- goto lock_node;
retry:
b = btree_cache_find(bc, k);
if (unlikely(!b)) {
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch2_btree_node_fill(c, iter, k, iter->btree_id,
+ b = bch2_btree_node_fill(trans, path, k, path->btree_id,
level, lock_type, true);
+ need_relock = true;
/* We raced and found the btree node in the cache */
if (!b)
if (IS_ERR(b))
return b;
} else {
-lock_node:
- /*
- * There's a potential deadlock with splits and insertions into
- * interior nodes we have to avoid:
- *
- * The other thread might be holding an intent lock on the node
- * we want, and they want to update its parent node so they're
- * going to upgrade their intent lock on the parent node to a
- * write lock.
- *
- * But if we're holding a read lock on the parent, and we're
- * trying to get the intent lock they're holding, we deadlock.
- *
- * So to avoid this we drop the read locks on parent nodes when
- * we're starting to take intent locks - and handle the race.
- *
- * The race is that they might be about to free the node we
- * want, and dropping our read lock on the parent node lets them
- * update the parent marking the node we want as freed, and then
- * free it:
- *
- * To guard against this, btree nodes are evicted from the cache
- * when they're freed - and b->hash_val is zeroed out, which we
- * check for after we lock the node.
- *
- * Then, bch2_btree_node_relock() on the parent will fail - because
- * the parent was modified, when the pointer to the node we want
- * was removed - and we'll bail out:
- */
- if (btree_node_read_locked(iter, level + 1))
- btree_node_unlock(iter, level + 1);
+ if (btree_node_read_locked(path, level + 1))
+ btree_node_unlock(trans, path, level + 1);
- if (!btree_node_lock(b, k->k.p, level, iter, lock_type,
- lock_node_check_fn, (void *) k, trace_ip)) {
- if (b->hash_val != btree_ptr_hash_val(k))
- goto retry;
- return ERR_PTR(-EINTR);
- }
+ ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ERR_PTR(ret);
+
+ BUG_ON(ret);
if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
b->c.level != level ||
race_fault())) {
six_unlock_type(&b->c.lock, lock_type);
- if (bch2_btree_node_relock(iter, level + 1))
+ if (bch2_btree_node_relock(trans, path, level + 1))
goto retry;
- trace_trans_restart_btree_node_reused(iter->trans->ip,
- trace_ip,
- iter->btree_id,
- &iter->real_pos);
- return ERR_PTR(-EINTR);
+ trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
}
+
+ /* avoid atomic set bit if it's not needed: */
+ if (!btree_node_accessed(b))
+ set_btree_node_accessed(b);
}
if (unlikely(btree_node_read_in_flight(b))) {
- u32 seq = b->c.lock.state.seq;
+ u32 seq = six_lock_seq(&b->c.lock);
six_unlock_type(&b->c.lock, lock_type);
- bch2_trans_unlock(iter->trans);
+ bch2_trans_unlock(trans);
+ need_relock = true;
bch2_btree_node_wait_on_read(b);
/*
- * XXX: check if this always fails - btree_iter_relock()
- * currently fails for iterators that aren't pointed at a valid
- * btree node
+ * should_be_locked is not set on this path yet, so we need to
+ * relock it specifically:
*/
- if (iter && !bch2_trans_relock(iter->trans))
- return ERR_PTR(-EINTR);
-
if (!six_relock_type(&b->c.lock, lock_type, seq))
goto retry;
}
+ if (unlikely(need_relock)) {
+ ret = bch2_trans_relock(trans) ?:
+ bch2_btree_path_relock_intent(trans, path);
+ if (ret) {
+ six_unlock_type(&b->c.lock, lock_type);
+ return ERR_PTR(ret);
+ }
+ }
+
+ prefetch(b->aux_data);
+
+ for_each_bset(b, t) {
+ void *p = (u64 *) b->aux_data + t->aux_data_offset;
+
+ prefetch(p + L1_CACHE_BYTES * 0);
+ prefetch(p + L1_CACHE_BYTES * 1);
+ prefetch(p + L1_CACHE_BYTES * 2);
+ }
+
+ if (unlikely(btree_node_read_error(b))) {
+ six_unlock_type(&b->c.lock, lock_type);
+ return ERR_PTR(-EIO);
+ }
+
+ EBUG_ON(b->c.btree_id != path->btree_id);
+ EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
+ btree_check_header(c, b);
+
+ return b;
+}
+
+/**
+ * bch2_btree_node_get - find a btree node in the cache and lock it, reading it
+ * in from disk if necessary.
+ *
+ * @trans: btree transaction object
+ * @path: btree_path being traversed
+ * @k: pointer to btree node (generally KEY_TYPE_btree_ptr_v2)
+ * @level: level of btree node being looked up (0 == leaf node)
+ * @lock_type: SIX_LOCK_read or SIX_LOCK_intent
+ * @trace_ip: ip of caller of btree iterator code (i.e. caller of bch2_btree_iter_peek())
+ *
+ * The btree node will have either a read or a write lock held, depending on
+ * the @write parameter.
+ *
+ * Returns: btree node or ERR_PTR()
+ */
+struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
+ const struct bkey_i *k, unsigned level,
+ enum six_lock_type lock_type,
+ unsigned long trace_ip)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b;
+ struct bset_tree *t;
+ int ret;
+
+ EBUG_ON(level >= BTREE_MAX_DEPTH);
+
+ b = btree_node_mem_ptr(k);
+
+ /*
+ * Check b->hash_val _before_ calling btree_node_lock() - this might not
+ * be the node we want anymore, and trying to lock the wrong node could
+ * cause an unneccessary transaction restart:
+ */
+ if (unlikely(!c->opts.btree_node_mem_ptr_optimization ||
+ !b ||
+ b->hash_val != btree_ptr_hash_val(k)))
+ return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
+
+ if (btree_node_read_locked(path, level + 1))
+ btree_node_unlock(trans, path, level + 1);
+
+ ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ERR_PTR(ret);
+
+ BUG_ON(ret);
+
+ if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
+ b->c.level != level ||
+ race_fault())) {
+ six_unlock_type(&b->c.lock, lock_type);
+ if (bch2_btree_node_relock(trans, path, level + 1))
+ return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
+
+ trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
+ }
+
+ if (unlikely(btree_node_read_in_flight(b))) {
+ six_unlock_type(&b->c.lock, lock_type);
+ return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
+ }
+
prefetch(b->aux_data);
for_each_bset(b, t) {
return ERR_PTR(-EIO);
}
- EBUG_ON(b->c.btree_id != iter->btree_id);
+ EBUG_ON(b->c.btree_id != path->btree_id);
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
btree_check_header(c, b);
return b;
}
-struct btree *bch2_btree_node_get_noiter(struct bch_fs *c,
+struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
const struct bkey_i *k,
enum btree_id btree_id,
unsigned level,
bool nofill)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
EBUG_ON(level >= BTREE_MAX_DEPTH);
- b = btree_node_mem_ptr(k);
- if (b)
- goto lock_node;
+ if (c->opts.btree_node_mem_ptr_optimization) {
+ b = btree_node_mem_ptr(k);
+ if (b)
+ goto lock_node;
+ }
retry:
b = btree_cache_find(bc, k);
if (unlikely(!b)) {
if (nofill)
goto out;
- b = bch2_btree_node_fill(c, NULL, k, btree_id,
+ b = bch2_btree_node_fill(trans, NULL, k, btree_id,
level, SIX_LOCK_read, true);
/* We raced and found the btree node in the cache */
goto retry;
if (IS_ERR(b) &&
- !bch2_btree_cache_cannibalize_lock(c, NULL))
+ !bch2_btree_cache_cannibalize_lock(trans, NULL))
goto retry;
if (IS_ERR(b))
goto out;
} else {
lock_node:
- ret = six_lock_read(&b->c.lock, lock_node_check_fn, (void *) k);
- if (ret)
- goto retry;
+ ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ERR_PTR(ret);
+
+ BUG_ON(ret);
if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
b->c.btree_id != btree_id ||
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
btree_check_header(c, b);
out:
- bch2_btree_cache_cannibalize_unlock(c);
+ bch2_btree_cache_cannibalize_unlock(trans);
return b;
}
-void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
- const struct bkey_i *k,
- enum btree_id btree_id, unsigned level)
+int bch2_btree_node_prefetch(struct btree_trans *trans,
+ struct btree_path *path,
+ const struct bkey_i *k,
+ enum btree_id btree_id, unsigned level)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- BUG_ON(iter && !btree_node_locked(iter, level + 1));
+ BUG_ON(trans && !btree_node_locked(path, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH);
b = btree_cache_find(bc, k);
if (b)
- return;
+ return 0;
- bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
+ b = bch2_btree_node_fill(trans, path, k, btree_id,
+ level, SIX_LOCK_read, false);
+ return PTR_ERR_OR_ZERO(b);
}
-void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k)
+void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
/* XXX we're called from btree_gc which will be holding other btree
* nodes locked
- * */
+ */
__bch2_btree_node_wait_on_read(b);
__bch2_btree_node_wait_on_write(b);
- six_lock_intent(&b->c.lock, NULL, NULL);
- six_lock_write(&b->c.lock, NULL, NULL);
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
if (btree_node_dirty(b)) {
- __bch2_btree_node_write(c, b);
+ __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
goto wait_on_io;
six_unlock_intent(&b->c.lock);
}
-void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
- struct btree *b)
+const char *bch2_btree_id_str(enum btree_id btree)
+{
+ return btree < BTREE_ID_NR ? __bch2_btree_ids[btree] : "(unknown)";
+}
+
+void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
+{
+ prt_printf(out, "%s level %u/%u\n ",
+ bch2_btree_id_str(b->c.btree_id),
+ b->c.level,
+ bch2_btree_id_root(c, b->c.btree_id)->level);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+}
+
+void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
{
- const struct bkey_format *f = &b->format;
struct bset_stats stats;
memset(&stats, 0, sizeof(stats));
bch2_btree_keys_stats(b, &stats);
- pr_buf(out, "l %u ", b->c.level);
+ prt_printf(out, "l %u ", b->c.level);
bch2_bpos_to_text(out, b->data->min_key);
- pr_buf(out, " - ");
+ prt_printf(out, " - ");
bch2_bpos_to_text(out, b->data->max_key);
- pr_buf(out, ":\n"
+ prt_printf(out, ":\n"
" ptrs: ");
bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+ prt_newline(out);
+
+ prt_printf(out,
+ " format: ");
+ bch2_bkey_format_to_text(out, &b->format);
- pr_buf(out, "\n"
- " format: u64s %u fields %u %u %u %u %u\n"
+ prt_printf(out,
" unpack fn len: %u\n"
" bytes used %zu/%zu (%zu%% full)\n"
" sib u64s: %u, %u (merge threshold %u)\n"
" nr unpacked keys %u\n"
" floats %zu\n"
" failed unpacked %zu\n",
- f->key_u64s,
- f->bits_per_field[0],
- f->bits_per_field[1],
- f->bits_per_field[2],
- f->bits_per_field[3],
- f->bits_per_field[4],
b->unpack_fn_len,
b->nr.live_u64s * sizeof(u64),
btree_bytes(c) - sizeof(struct btree_node),
stats.failed);
}
-void bch2_btree_cache_to_text(struct printbuf *out, struct bch_fs *c)
+void bch2_btree_cache_to_text(struct printbuf *out, const struct bch_fs *c)
{
- pr_buf(out, "nr nodes:\t\t%u\n", c->btree_cache.used);
- pr_buf(out, "nr dirty:\t\t%u\n", atomic_read(&c->btree_cache.dirty));
- pr_buf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock);
+ prt_printf(out, "nr nodes:\t\t%u\n", c->btree_cache.used);
+ prt_printf(out, "nr dirty:\t\t%u\n", atomic_read(&c->btree_cache.dirty));
+ prt_printf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock);
}