]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_cache.c
Update bcachefs sources to 31c09369cd six locks: Fix an unitialized var
[bcachefs-tools-debian] / libbcachefs / btree_cache.c
index e8530cceacf46e87e02e64317165938db1242cf3..f8402709190079a6e108f901193880212e0a794a 100644 (file)
@@ -9,11 +9,11 @@
 #include "debug.h"
 #include "errcode.h"
 #include "error.h"
+#include "trace.h"
 
 #include <linux/prefetch.h>
 #include <linux/sched/mm.h>
 #include <linux/seq_buf.h>
-#include <trace/events/bcachefs.h>
 
 #define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \
 do {                                            \
@@ -62,10 +62,12 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
 
        EBUG_ON(btree_node_write_in_flight(b));
 
+       clear_btree_node_just_written(b);
+
        kvpfree(b->data, btree_bytes(c));
        b->data = NULL;
 #ifdef __KERNEL__
-       vfree(b->aux_data);
+       kvfree(b->aux_data);
 #else
        munmap(b->aux_data, btree_aux_data_bytes(b));
 #endif
@@ -98,9 +100,9 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 
        b->data = kvpmalloc(btree_bytes(c), gfp);
        if (!b->data)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
 #ifdef __KERNEL__
-       b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
+       b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp);
 #else
        b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
                           PROT_READ|PROT_WRITE|PROT_EXEC,
@@ -111,7 +113,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
        if (!b->aux_data) {
                kvpfree(b->data, btree_bytes(c));
                b->data = NULL;
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
        }
 
        return 0;
@@ -126,7 +128,6 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
                return NULL;
 
        bkey_btree_ptr_init(&b->key);
-       bch2_btree_lock_init(&b->c);
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        lockdep_set_no_check_recursion(&b->c.lock.dep_map);
 #endif
@@ -150,6 +151,8 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
                return NULL;
        }
 
+       bch2_btree_lock_init(&b->c, 0);
+
        bc->used++;
        list_add(&b->list, &bc->freeable);
        return b;
@@ -223,7 +226,7 @@ wait_on_io:
                                BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
                        else if (btree_node_write_in_flight(b))
                                BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_btree_node_reclaim;
                }
 
                /* XXX: waiting on IO with btree cache lock held */
@@ -233,7 +236,7 @@ wait_on_io:
 
        if (!six_trylock_intent(&b->c.lock)) {
                BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_reclaim;
        }
 
        if (!six_trylock_write(&b->c.lock)) {
@@ -299,7 +302,7 @@ out_unlock:
        six_unlock_write(&b->c.lock);
 out_unlock_intent:
        six_unlock_intent(&b->c.lock);
-       ret = -ENOMEM;
+       ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
        goto out;
 }
 
@@ -484,7 +487,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
        while (!list_empty(&bc->freed_nonpcpu)) {
                b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
                list_del(&b->list);
-               six_lock_pcpu_free(&b->c.lock);
+               six_lock_exit(&b->c.lock);
                kfree(b);
        }
 
@@ -513,7 +516,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
 
        for (i = 0; i < bc->reserve; i++)
                if (!__bch2_btree_node_mem_alloc(c)) {
-                       ret = -ENOMEM;
+                       ret = -BCH_ERR_ENOMEM_fs_btree_cache_init;
                        goto out;
                }
 
@@ -568,7 +571,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
 
        if (!cl) {
                trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
        }
 
        closure_wait(&bc->alloc_wait, cl);
@@ -645,8 +648,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
                mutex_lock(&bc->lock);
        }
 
-       if (pcpu_read_locks)
-               six_lock_pcpu_alloc(&b->c.lock);
+       bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
 
        BUG_ON(!six_trylock_intent(&b->c.lock));
        BUG_ON(!six_trylock_write(&b->c.lock));
@@ -700,6 +702,7 @@ err:
        /* Try to cannibalize another cached btree node: */
        if (bc->alloc_lock == current) {
                b2 = btree_node_cannibalize(c);
+               clear_btree_node_just_written(b2);
                bch2_btree_node_hash_remove(bc, b2);
 
                if (b) {
@@ -721,7 +724,7 @@ err:
 
        mutex_unlock(&bc->lock);
        memalloc_nofs_restore(flags);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
 }
 
 /* Slowpath, don't want it inlined into btree_iter_traverse() */
@@ -750,7 +753,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
 
        b = bch2_btree_node_mem_alloc(trans, level != 0);
 
-       if (b == ERR_PTR(-ENOMEM)) {
+       if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
                trans->memory_allocation_failure = true;
                trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
@@ -784,7 +787,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
        set_btree_node_read_in_flight(b);
 
        six_unlock_write(&b->c.lock);
-       seq = b->c.lock.state.seq;
+       seq = six_lock_seq(&b->c.lock);
        six_unlock_intent(&b->c.lock);
 
        /* Unlock before doing IO: */
@@ -908,7 +911,7 @@ retry:
        }
 
        if (unlikely(btree_node_read_in_flight(b))) {
-               u32 seq = b->c.lock.state.seq;
+               u32 seq = six_lock_seq(&b->c.lock);
 
                six_unlock_type(&b->c.lock, lock_type);
                bch2_trans_unlock(trans);
@@ -1006,7 +1009,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
        }
 
        if (unlikely(btree_node_read_in_flight(b))) {
-               u32 seq = b->c.lock.state.seq;
+               u32 seq = six_lock_seq(&b->c.lock);
 
                six_unlock_type(&b->c.lock, lock_type);
                bch2_trans_unlock(trans);