]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_cache.c
Update bcachefs sources to 0342eebf85 bcachefs: Improve the backpointer to missing...
[bcachefs-tools-debian] / libbcachefs / btree_cache.c
index e8530cceacf46e87e02e64317165938db1242cf3..c53597a29e2e0bdcdc42421771beda976dfba056 100644 (file)
@@ -98,7 +98,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 
        b->data = kvpmalloc(btree_bytes(c), gfp);
        if (!b->data)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
 #ifdef __KERNEL__
        b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
 #else
@@ -111,7 +111,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
        if (!b->aux_data) {
                kvpfree(b->data, btree_bytes(c));
                b->data = NULL;
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
        }
 
        return 0;
@@ -223,7 +223,7 @@ wait_on_io:
                                BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
                        else if (btree_node_write_in_flight(b))
                                BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_btree_node_reclaim;
                }
 
                /* XXX: waiting on IO with btree cache lock held */
@@ -233,7 +233,7 @@ wait_on_io:
 
        if (!six_trylock_intent(&b->c.lock)) {
                BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_reclaim;
        }
 
        if (!six_trylock_write(&b->c.lock)) {
@@ -299,7 +299,7 @@ out_unlock:
        six_unlock_write(&b->c.lock);
 out_unlock_intent:
        six_unlock_intent(&b->c.lock);
-       ret = -ENOMEM;
+       ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
        goto out;
 }
 
@@ -513,7 +513,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
 
        for (i = 0; i < bc->reserve; i++)
                if (!__bch2_btree_node_mem_alloc(c)) {
-                       ret = -ENOMEM;
+                       ret = -BCH_ERR_ENOMEM_fs_btree_cache_init;
                        goto out;
                }
 
@@ -568,7 +568,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
 
        if (!cl) {
                trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
        }
 
        closure_wait(&bc->alloc_wait, cl);
@@ -721,7 +721,7 @@ err:
 
        mutex_unlock(&bc->lock);
        memalloc_nofs_restore(flags);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
 }
 
 /* Slowpath, don't want it inlined into btree_iter_traverse() */
@@ -750,7 +750,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
 
        b = bch2_btree_node_mem_alloc(trans, level != 0);
 
-       if (b == ERR_PTR(-ENOMEM)) {
+       if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
                trans->memory_allocation_failure = true;
                trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));