#include "debug.h"
#include "errcode.h"
#include "error.h"
+#include "trace.h"
#include <linux/prefetch.h>
#include <linux/sched/mm.h>
-#include <trace/events/bcachefs.h>
+#include <linux/seq_buf.h>
#define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \
do { \
EBUG_ON(btree_node_write_in_flight(b));
+ clear_btree_node_just_written(b);
+
kvpfree(b->data, btree_bytes(c));
b->data = NULL;
#ifdef __KERNEL__
- vfree(b->aux_data);
+ kvfree(b->aux_data);
#else
munmap(b->aux_data, btree_aux_data_bytes(b));
#endif
b->data = kvpmalloc(btree_bytes(c), gfp);
if (!b->data)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
#ifdef __KERNEL__
- b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
+ b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp);
#else
b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
PROT_READ|PROT_WRITE|PROT_EXEC,
if (!b->aux_data) {
kvpfree(b->data, btree_bytes(c));
b->data = NULL;
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
}
return 0;
return NULL;
bkey_btree_ptr_init(&b->key);
- __six_lock_init(&b->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- lockdep_set_no_check_recursion(&b->c.lock.dep_map);
-#endif
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
b->byte_order = ilog2(btree_bytes(c));
return NULL;
}
+ bch2_btree_lock_init(&b->c, 0);
+
bc->used++;
list_add(&b->list, &bc->freeable);
return b;
BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
else if (btree_node_write_in_flight(b))
BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_node_reclaim;
}
/* XXX: waiting on IO with btree cache lock held */
if (!six_trylock_intent(&b->c.lock)) {
BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_node_reclaim;
}
if (!six_trylock_write(&b->c.lock)) {
six_unlock_write(&b->c.lock);
out_unlock_intent:
six_unlock_intent(&b->c.lock);
- ret = -ENOMEM;
+ ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
goto out;
}
return btree_cache_can_free(bc);
}
-static void bch2_btree_cache_shrinker_to_text(struct printbuf *out, struct shrinker *shrink)
+static void bch2_btree_cache_shrinker_to_text(struct seq_buf *s, struct shrinker *shrink)
{
struct bch_fs *c = container_of(shrink, struct bch_fs,
btree_cache.shrink);
+ char *cbuf;
+ size_t buflen = seq_buf_get_buf(s, &cbuf);
+ struct printbuf out = PRINTBUF_EXTERN(cbuf, buflen);
- bch2_btree_cache_to_text(out, &c->btree_cache);
+ bch2_btree_cache_to_text(&out, &c->btree_cache);
+ seq_buf_commit(s, out.pos);
}
void bch2_fs_btree_cache_exit(struct bch_fs *c)
while (!list_empty(&bc->freed_nonpcpu)) {
b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
list_del(&b->list);
- six_lock_pcpu_free(&b->c.lock);
+ six_lock_exit(&b->c.lock);
kfree(b);
}
for (i = 0; i < bc->reserve; i++)
if (!__bch2_btree_node_mem_alloc(c)) {
- ret = -ENOMEM;
+ ret = -BCH_ERR_ENOMEM_fs_btree_cache_init;
goto out;
}
if (!cl) {
trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
}
closure_wait(&bc->alloc_wait, cl);
}
}
-struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks)
+struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct list_head *freed = pcpu_read_locks
? &bc->freed_pcpu
goto got_node;
}
- b = __btree_node_mem_alloc(c, __GFP_NOWARN);
+ b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
if (!b) {
mutex_unlock(&bc->lock);
+ bch2_trans_unlock(trans);
b = __btree_node_mem_alloc(c, GFP_KERNEL);
if (!b)
goto err;
mutex_lock(&bc->lock);
}
- if (pcpu_read_locks)
- six_lock_pcpu_alloc(&b->c.lock);
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
BUG_ON(!six_trylock_intent(&b->c.lock));
BUG_ON(!six_trylock_write(&b->c.lock));
mutex_unlock(&bc->lock);
- if (btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL))
- goto err;
+ if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) {
+ bch2_trans_unlock(trans);
+ if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN))
+ goto err;
+ }
mutex_lock(&bc->lock);
bc->used++;
/* Try to cannibalize another cached btree node: */
if (bc->alloc_lock == current) {
b2 = btree_node_cannibalize(c);
+ clear_btree_node_just_written(b2);
bch2_btree_node_hash_remove(bc, b2);
if (b) {
mutex_unlock(&bc->lock);
memalloc_nofs_restore(flags);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
}
/* Slowpath, don't want it inlined into btree_iter_traverse() */
-static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
- struct btree_trans *trans,
+static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id,
enum six_lock_type lock_type,
bool sync)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
u32 seq;
* Parent node must be locked, else we could read in a btree node that's
* been freed:
*/
- if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
+ if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
}
- b = bch2_btree_node_mem_alloc(c, level != 0);
+ b = bch2_btree_node_mem_alloc(trans, level != 0);
- if (trans && b == ERR_PTR(-ENOMEM)) {
+ if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
trans->memory_allocation_failure = true;
trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
set_btree_node_read_in_flight(b);
six_unlock_write(&b->c.lock);
- seq = b->c.lock.state.seq;
+ seq = six_lock_seq(&b->c.lock);
six_unlock_intent(&b->c.lock);
/* Unlock before doing IO: */
if (!sync)
return NULL;
- if (trans) {
+ if (path) {
int ret = bch2_trans_relock(trans) ?:
bch2_btree_path_relock_intent(trans, path);
if (ret) {
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) {
- if (trans)
+ if (path)
trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
}
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
+ bool need_relock = false;
int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH);
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch2_btree_node_fill(c, trans, path, k, path->btree_id,
+ b = bch2_btree_node_fill(trans, path, k, path->btree_id,
level, lock_type, true);
+ need_relock = true;
/* We raced and found the btree node in the cache */
if (!b)
}
if (unlikely(btree_node_read_in_flight(b))) {
- u32 seq = b->c.lock.state.seq;
+ u32 seq = six_lock_seq(&b->c.lock);
six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(trans);
+ need_relock = true;
bch2_btree_node_wait_on_read(b);
* should_be_locked is not set on this path yet, so we need to
* relock it specifically:
*/
- if (trans) {
- int ret = bch2_trans_relock(trans) ?:
- bch2_btree_path_relock_intent(trans, path);
- if (ret) {
- BUG_ON(!trans->restarted);
- return ERR_PTR(ret);
- }
- }
-
if (!six_relock_type(&b->c.lock, lock_type, seq))
goto retry;
}
+ if (unlikely(need_relock)) {
+ int ret = bch2_trans_relock(trans) ?:
+ bch2_btree_path_relock_intent(trans, path);
+ if (ret) {
+ six_unlock_type(&b->c.lock, lock_type);
+ return ERR_PTR(ret);
+ }
+ }
+
prefetch(b->aux_data);
for_each_bset(b, t) {
}
if (unlikely(btree_node_read_in_flight(b))) {
- u32 seq = b->c.lock.state.seq;
+ u32 seq = six_lock_seq(&b->c.lock);
six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(trans);
if (nofill)
goto out;
- b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id,
+ b = bch2_btree_node_fill(trans, NULL, k, btree_id,
level, SIX_LOCK_read, true);
/* We raced and found the btree node in the cache */
goto out;
} else {
lock_node:
- ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read);
+ ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
return ERR_PTR(ret);
return b;
}
-int bch2_btree_node_prefetch(struct bch_fs *c,
- struct btree_trans *trans,
+int bch2_btree_node_prefetch(struct btree_trans *trans,
struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id, unsigned level)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
if (b)
return 0;
- b = bch2_btree_node_fill(c, trans, path, k, btree_id,
+ b = bch2_btree_node_fill(trans, path, k, btree_id,
level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b);
}
}
void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
- struct btree *b)
+ const struct btree *b)
{
const struct bkey_format *f = &b->format;
struct bset_stats stats;
stats.failed);
}
-void bch2_btree_cache_to_text(struct printbuf *out, struct btree_cache *bc)
+void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc)
{
prt_printf(out, "nr nodes:\t\t%u\n", bc->used);
prt_printf(out, "nr dirty:\t\t%u\n", atomic_read(&bc->dirty));