]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update bcachefs sources to 0342eebf85 bcachefs: Improve the backpointer to missing...
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 15 Mar 2023 12:59:01 +0000 (08:59 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 15 Mar 2023 12:59:01 +0000 (08:59 -0400)
34 files changed:
.bcachefs_revision
libbcachefs/backpointers.c
libbcachefs/backpointers.h
libbcachefs/btree_cache.c
libbcachefs/btree_gc.c
libbcachefs/btree_io.c
libbcachefs/btree_iter.c
libbcachefs/btree_key_cache.c
libbcachefs/btree_update_interior.c
libbcachefs/btree_update_leaf.c
libbcachefs/btree_write_buffer.c
libbcachefs/buckets.c
libbcachefs/buckets_waiting_for_journal.c
libbcachefs/checksum.c
libbcachefs/clock.c
libbcachefs/compress.c
libbcachefs/counters.c
libbcachefs/disk_groups.c
libbcachefs/ec.c
libbcachefs/errcode.h
libbcachefs/extents.c
libbcachefs/fs-io.c
libbcachefs/fsck.c
libbcachefs/io.c
libbcachefs/journal.c
libbcachefs/journal_io.c
libbcachefs/journal_sb.c
libbcachefs/journal_seq_blacklist.c
libbcachefs/move.c
libbcachefs/recovery.c
libbcachefs/replicas.c
libbcachefs/subvolume.c
libbcachefs/super-io.c
libbcachefs/super.c

index d8d138657af4c2d10a014528d685e450ee1b101a..ff46d7d81241a5d555227cc094a51f3edb0d537a 100644 (file)
@@ -1 +1 @@
-72405e7ff8c5fb569b74b046d19866ee480f29b7
+0342eebf85b7be76f01bacec8f958c6e6039535b
index 8517c56352264451315103848521ad9a09f9d26a..740084b3ff12f448bda0b314c23ca5bb779c42bb 100644 (file)
@@ -298,11 +298,12 @@ err:
 /*
  * Find the next backpointer >= *bp_offset:
  */
-int bch2_get_next_backpointer(struct btree_trans *trans,
-                             struct bpos bucket, int gen,
-                             u64 *bp_offset,
-                             struct bch_backpointer *dst,
-                             unsigned iter_flags)
+int __bch2_get_next_backpointer(struct btree_trans *trans,
+                               struct bpos bucket, int gen,
+                               u64 *bp_offset,
+                               struct bpos *bp_pos_ret,
+                               struct bch_backpointer *dst,
+                               unsigned iter_flags)
 {
        struct bch_fs *c = trans->c;
        struct bpos bp_pos, bp_end_pos;
@@ -352,6 +353,7 @@ int bch2_get_next_backpointer(struct btree_trans *trans,
 
                *dst = *bkey_s_c_to_backpointer(k).v;
                *bp_offset = dst->bucket_offset + BACKPOINTER_OFFSET_MAX;
+               *bp_pos_ret = k.k->p;
                goto out;
        }
 done:
@@ -362,6 +364,19 @@ out:
        return ret;
 }
 
+int bch2_get_next_backpointer(struct btree_trans *trans,
+                             struct bpos bucket, int gen,
+                             u64 *bp_offset,
+                             struct bch_backpointer *dst,
+                             unsigned iter_flags)
+{
+       struct bpos bp_pos;
+
+       return __bch2_get_next_backpointer(trans, bucket, gen,
+                                          bp_offset, &bp_pos,
+                                          dst, iter_flags);
+}
+
 static void backpointer_not_found(struct btree_trans *trans,
                                  struct bpos bucket,
                                  u64 bp_offset,
@@ -952,7 +967,7 @@ static int check_one_backpointer(struct btree_trans *trans,
        struct printbuf buf = PRINTBUF;
        int ret;
 
-       ret = bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp, 0);
+       ret = __bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp_pos, &bp, 0);
        if (ret || *bp_offset == U64_MAX)
                return ret;
 
@@ -968,23 +983,17 @@ static int check_one_backpointer(struct btree_trans *trans,
        if (ret)
                return ret;
 
-       bp_pos = bucket_pos_to_bp(c, bucket,
-                       max(*bp_offset, BACKPOINTER_OFFSET_MAX) - BACKPOINTER_OFFSET_MAX);
-
        if (!k.k && !bpos_eq(*last_flushed_pos, bp_pos)) {
                *last_flushed_pos = bp_pos;
-               pr_info("flushing at %llu:%llu",
-                       last_flushed_pos->inode,
-                       last_flushed_pos->offset);
-
                ret = bch2_btree_write_buffer_flush_sync(trans) ?:
                        -BCH_ERR_transaction_restart_write_buffer_flush;
                goto out;
        }
 
        if (fsck_err_on(!k.k, c,
-                       "%s backpointer points to missing extent\n%s",
-                       *bp_offset < BACKPOINTER_OFFSET_MAX ? "alloc" : "btree",
+                       "backpointer for %llu:%llu:%llu (btree pos %llu:%llu) points to missing extent\n  %s",
+                       bucket.inode, bucket.offset, (u64) bp.bucket_offset,
+                       bp_pos.inode, bp_pos.offset,
                        (bch2_backpointer_to_text(&buf, &bp), buf.buf))) {
                ret = bch2_backpointer_del_by_offset(trans, bucket, *bp_offset, bp);
                if (ret == -ENOENT)
index 314fee21dc277210fcaac107be8f580b6ab43e23..d0ba5d8596c5715197ddd39e37d79fcbe1f8530a 100644 (file)
@@ -48,7 +48,7 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
                  (bucket_to_sector(ca, bucket.offset) <<
                   MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
 
-       BUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
+       EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
 
        return ret;
 }
index e8530cceacf46e87e02e64317165938db1242cf3..c53597a29e2e0bdcdc42421771beda976dfba056 100644 (file)
@@ -98,7 +98,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 
        b->data = kvpmalloc(btree_bytes(c), gfp);
        if (!b->data)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
 #ifdef __KERNEL__
        b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
 #else
@@ -111,7 +111,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
        if (!b->aux_data) {
                kvpfree(b->data, btree_bytes(c));
                b->data = NULL;
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
        }
 
        return 0;
@@ -223,7 +223,7 @@ wait_on_io:
                                BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
                        else if (btree_node_write_in_flight(b))
                                BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_btree_node_reclaim;
                }
 
                /* XXX: waiting on IO with btree cache lock held */
@@ -233,7 +233,7 @@ wait_on_io:
 
        if (!six_trylock_intent(&b->c.lock)) {
                BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_reclaim;
        }
 
        if (!six_trylock_write(&b->c.lock)) {
@@ -299,7 +299,7 @@ out_unlock:
        six_unlock_write(&b->c.lock);
 out_unlock_intent:
        six_unlock_intent(&b->c.lock);
-       ret = -ENOMEM;
+       ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
        goto out;
 }
 
@@ -513,7 +513,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
 
        for (i = 0; i < bc->reserve; i++)
                if (!__bch2_btree_node_mem_alloc(c)) {
-                       ret = -ENOMEM;
+                       ret = -BCH_ERR_ENOMEM_fs_btree_cache_init;
                        goto out;
                }
 
@@ -568,7 +568,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
 
        if (!cl) {
                trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
        }
 
        closure_wait(&bc->alloc_wait, cl);
@@ -721,7 +721,7 @@ err:
 
        mutex_unlock(&bc->lock);
        memalloc_nofs_restore(flags);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
 }
 
 /* Slowpath, don't want it inlined into btree_iter_traverse() */
@@ -750,7 +750,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
 
        b = bch2_btree_node_mem_alloc(trans, level != 0);
 
-       if (b == ERR_PTR(-ENOMEM)) {
+       if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
                trans->memory_allocation_failure = true;
                trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
index 65fda70d086efea7f2b554e37ab26368ef936dd3..a728e9906e9c080a1e54894f007395dbb68b3a90 100644 (file)
@@ -201,7 +201,7 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
 
        new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
        if (!new)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_gc_repair_key;
 
        btree_ptr_to_v2(b, new);
        b->data->min_key        = new_min;
@@ -230,7 +230,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
 
        new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
        if (!new)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_gc_repair_key;
 
        btree_ptr_to_v2(b, new);
        b->data->max_key        = new_max;
@@ -686,7 +686,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
                new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
                if (!new) {
                        bch_err(c, "%s: error allocating new key", __func__);
-                       ret = -ENOMEM;
+                       ret = -BCH_ERR_ENOMEM_gc_repair_key;
                        goto err;
                }
 
@@ -1296,7 +1296,7 @@ static int bch2_gc_start(struct bch_fs *c)
                                         sizeof(u64), GFP_KERNEL);
        if (!c->usage_gc) {
                bch_err(c, "error allocating c->usage_gc");
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_gc_start;
        }
 
        for_each_member_device(ca, c, i) {
@@ -1306,7 +1306,7 @@ static int bch2_gc_start(struct bch_fs *c)
                if (!ca->usage_gc) {
                        bch_err(c, "error allocating ca->usage_gc");
                        percpu_ref_put(&ca->ref);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_gc_start;
                }
 
                this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets,
@@ -1498,7 +1498,7 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
                if (!buckets) {
                        percpu_ref_put(&ca->ref);
                        bch_err(c, "error allocating ca->buckets[gc]");
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_gc_alloc_start;
                }
 
                buckets->first_bucket   = ca->mi.first_bucket;
@@ -1659,7 +1659,7 @@ static int bch2_gc_reflink_start(struct bch_fs *c,
                r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
                                       GFP_KERNEL);
                if (!r) {
-                       ret = -ENOMEM;
+                       ret = -BCH_ERR_ENOMEM_gc_reflink_start;
                        break;
                }
 
@@ -1980,7 +1980,7 @@ int bch2_gc_gens(struct bch_fs *c)
                ca->oldest_gen = kvmalloc(ca->mi.nbuckets, GFP_KERNEL);
                if (!ca->oldest_gen) {
                        percpu_ref_put(&ca->ref);
-                       ret = -ENOMEM;
+                       ret = -BCH_ERR_ENOMEM_gc_gens;
                        goto err;
                }
 
index 7a9cc3787442dc4377c35a0bd3b6334a911e8bde..586e2f96f649af1e98aa3eddd39378f634530ece 100644 (file)
@@ -1485,7 +1485,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
 
        ra = kzalloc(sizeof(*ra), GFP_NOFS);
        if (!ra)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
 
        closure_init(&ra->cl, NULL);
        ra->c   = c;
index 0a3e560597f7ebe90778ed16d14986b1b042f85c..8f7d376972818137f27aff8a18e5e56307991d7a 100644 (file)
@@ -1012,7 +1012,7 @@ retry_all:
                        __btree_path_put(path, false);
 
                        if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
-                           ret == -ENOMEM)
+                           bch2_err_matches(ret, ENOMEM))
                                goto retry_all;
                        if (ret)
                                goto err;
@@ -2809,7 +2809,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
        }
 
        if (!new_mem)
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
 
        trans->mem = new_mem;
        trans->mem_bytes = new_bytes;
index 27a73933878a2b7618f506f05ac39e745aa058c7..33269afe9cf22c39ee6cf8a16194fdc2765f76a0 100644 (file)
@@ -337,7 +337,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
                if (unlikely(!ck)) {
                        bch_err(c, "error allocating memory for key cache item, btree %s",
                                bch2_btree_ids[path->btree_id]);
-                       return ERR_PTR(-ENOMEM);
+                       return ERR_PTR(-BCH_ERR_ENOMEM_btree_key_cache_create);
                }
 
                mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
@@ -424,7 +424,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
                        if (!new_k) {
                                bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
                                        bch2_btree_ids[ck->key.btree_id], new_u64s);
-                               ret = -ENOMEM;
+                               ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
                                goto err;
                        }
 
@@ -1056,17 +1056,15 @@ static void bch2_btree_key_cache_shrinker_to_text(struct seq_buf *s, struct shri
 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
 {
        struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
-       int ret;
 
 #ifdef __KERNEL__
        bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
        if (!bc->pcpu_freed)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_fs_btree_cache_init;
 #endif
 
-       ret = rhashtable_init(&bc->table, &bch2_btree_key_cache_params);
-       if (ret)
-               return ret;
+       if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
+               return -BCH_ERR_ENOMEM_fs_btree_cache_init;
 
        bc->table_init_done = true;
 
@@ -1074,7 +1072,9 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
        bc->shrink.count_objects        = bch2_btree_key_cache_count;
        bc->shrink.scan_objects         = bch2_btree_key_cache_scan;
        bc->shrink.to_text              = bch2_btree_key_cache_shrinker_to_text;
-       return register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name);
+       if (register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name))
+               return -BCH_ERR_ENOMEM_fs_btree_cache_init;
+       return 0;
 }
 
 void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
index 92a49971188cbe100e87e4bc0ddb9efcff8e875b..4d63c4d766677934c0d55fd7bc83c1fb8a09ce2b 100644 (file)
@@ -2474,8 +2474,11 @@ int bch2_fs_btree_interior_update_init(struct bch_fs *c)
        c->btree_interior_update_worker =
                alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
        if (!c->btree_interior_update_worker)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
 
-       return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
-                                        sizeof(struct btree_update));
+       if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
+                                     sizeof(struct btree_update)))
+               return -BCH_ERR_ENOMEM_btree_interior_update_pool_init;
+
+       return 0;
 }
index 629e528899d99777a7090fa820ab3ab066ac9bf4..de98d7601a0e1dc87211a5cdfb1b8e3d1d5c027f 100644 (file)
@@ -401,7 +401,7 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
        if (!new_k) {
                bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
                        bch2_btree_ids[path->btree_id], new_u64s);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_key_cache_insert;
        }
 
        trans_for_each_update(trans, i)
@@ -1891,7 +1891,7 @@ static int __bch2_trans_log_msg(darray_u64 *entries, const char *fmt, va_list ar
        int ret;
 
        prt_vprintf(&buf, fmt, args);
-       ret = buf.allocation_failure ? -ENOMEM : 0;
+       ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
        if (ret)
                goto err;
 
index 026c249a3f441c9073aaa2641a9fec9290b50baf..80f4b9839bc22021b1090f3310bdcd30b1bf5765 100644 (file)
@@ -333,7 +333,7 @@ int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
        wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
        wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
        if (!wb->keys[0] || !wb->keys[1])
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init;
 
        return 0;
 }
index 1bcef419cfabedd8b8f777df3a51105200dbdc45..9f2ecff55c50c70220eb1adb20bbb4aab505e792 100644 (file)
@@ -906,7 +906,7 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
        if (!m) {
                bch_err(c, "error allocating memory for gc_stripes, idx %llu",
                        (u64) p.idx);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_mark_stripe_ptr;
        }
 
        mutex_lock(&c->ec_stripes_heap_lock);
@@ -1075,7 +1075,7 @@ int bch2_mark_stripe(struct btree_trans *trans,
                if (!m) {
                        bch_err(c, "error allocating memory for gc_stripes, idx %llu",
                                idx);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_mark_stripe;
                }
                /*
                 * This will be wrong when we bring back runtime gc: we should
@@ -2045,15 +2045,21 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
        struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
        unsigned long *buckets_nouse = NULL;
        bool resize = ca->bucket_gens != NULL;
-       int ret = -ENOMEM;
+       int ret;
 
        if (!(bucket_gens       = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
-                                           GFP_KERNEL|__GFP_ZERO)) ||
-           (c->opts.buckets_nouse &&
+                                           GFP_KERNEL|__GFP_ZERO))) {
+               ret = -BCH_ERR_ENOMEM_bucket_gens;
+               goto err;
+       }
+
+       if ((c->opts.buckets_nouse &&
             !(buckets_nouse    = kvpmalloc(BITS_TO_LONGS(nbuckets) *
                                            sizeof(unsigned long),
-                                           GFP_KERNEL|__GFP_ZERO))))
+                                           GFP_KERNEL|__GFP_ZERO)))) {
+               ret = -BCH_ERR_ENOMEM_buckets_nouse;
                goto err;
+       }
 
        bucket_gens->first_bucket = ca->mi.first_bucket;
        bucket_gens->nbuckets   = nbuckets;
@@ -2123,12 +2129,12 @@ int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
 
        ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
        if (!ca->usage_base)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_usage_init;
 
        for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
                ca->usage[i] = alloc_percpu(struct bch_dev_usage);
                if (!ca->usage[i])
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_usage_init;
        }
 
        return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
index f3774e30b5cdf12b4bf85c6a9fc8dfc9e367360a..81ab685cdef9f35dbed4a55fa2044aad8dcbaf59 100644 (file)
@@ -110,7 +110,7 @@ int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
 
        n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL);
        if (!n) {
-               ret = -ENOMEM;
+               ret = -BCH_ERR_ENOMEM_buckets_waiting_for_journal_set;
                goto out;
        }
 
@@ -159,7 +159,7 @@ int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *c)
        b->t = kvmalloc(sizeof(*b->t) +
                        (sizeof(b->t->d[0]) << INITIAL_TABLE_BITS), GFP_KERNEL);
        if (!b->t)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_buckets_waiting_for_journal_init;
 
        bucket_table_init(b->t, INITIAL_TABLE_BITS);
        return 0;
index 43d22fe8131b00d720ed58702da1696577027f1b..843e138862f6a62d7aa44ece67c17a6f2b8efff7 100644 (file)
@@ -133,7 +133,7 @@ static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
 
                sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
                if (!sg)
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_do_encrypt;
 
                sg_init_table(sg, pages);
 
@@ -648,7 +648,7 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed)
 
        crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
        if (!crypt) {
-               ret = -ENOMEM; /* XXX this technically could be -ENOSPC */
+               ret = -BCH_ERR_ENOSPC_sb_crypt;
                goto err;
        }
 
index 00d0e6725910155f1adbef4ebe375db493ff0de1..f41889093a2c7eacaa1723667fc7bb2af5d0f3aa 100644 (file)
@@ -184,10 +184,10 @@ int bch2_io_clock_init(struct io_clock *clock)
 
        clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
        if (!clock->pcpu_buf)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_io_clock_init;
 
        if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_io_clock_init;
 
        return 0;
 }
index 2b7080b67ecac518d297b77c953c2c6374f9bd23..6bec38440249a9b43a801479ade1a6bf30836acc 100644 (file)
@@ -270,7 +270,7 @@ int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
 {
        struct bbuf dst_data = { NULL };
        size_t dst_len = crc.uncompressed_size << 9;
-       int ret = -ENOMEM;
+       int ret;
 
        if (crc.uncompressed_size << 9  > c->opts.encoded_extent_max ||
            crc.compressed_size << 9    > c->opts.encoded_extent_max)
@@ -542,7 +542,7 @@ void bch2_fs_compress_exit(struct bch_fs *c)
        mempool_exit(&c->compression_bounce[READ]);
 }
 
-static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
+static int _bch2_fs_compress_init(struct bch_fs *c, u64 features)
 {
        size_t decompress_workspace_size = 0;
        bool decompress_workspace_needed;
@@ -561,34 +561,27 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
                        zstd_cctx_workspace_bound(&params.cParams),
                        zstd_dctx_workspace_bound() },
        }, *i;
-       int ret = 0;
-
-       pr_verbose_init(c->opts, "");
+       bool have_compressed = false;
 
        c->zstd_params = params;
 
        for (i = compression_types;
             i < compression_types + ARRAY_SIZE(compression_types);
             i++)
-               if (features & (1 << i->feature))
-                       goto have_compressed;
+               have_compressed |= (features & (1 << i->feature)) != 0;
 
-       goto out;
-have_compressed:
+       if (!have_compressed)
+               return 0;
 
-       if (!mempool_initialized(&c->compression_bounce[READ])) {
-               ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
-                                                 1, c->opts.encoded_extent_max);
-               if (ret)
-                       goto out;
-       }
+       if (!mempool_initialized(&c->compression_bounce[READ]) &&
+           mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
+                                       1, c->opts.encoded_extent_max))
+               return -BCH_ERR_ENOMEM_compression_bounce_read_init;
 
-       if (!mempool_initialized(&c->compression_bounce[WRITE])) {
-               ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
-                                                 1, c->opts.encoded_extent_max);
-               if (ret)
-                       goto out;
-       }
+       if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
+           mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
+                                       1, c->opts.encoded_extent_max))
+               return -BCH_ERR_ENOMEM_compression_bounce_write_init;
 
        for (i = compression_types;
             i < compression_types + ARRAY_SIZE(compression_types);
@@ -605,22 +598,28 @@ have_compressed:
                if (mempool_initialized(&c->compress_workspace[i->type]))
                        continue;
 
-               ret = mempool_init_kvpmalloc_pool(
+               if (mempool_init_kvpmalloc_pool(
                                &c->compress_workspace[i->type],
-                               1, i->compress_workspace);
-               if (ret)
-                       goto out;
+                               1, i->compress_workspace))
+                       return -BCH_ERR_ENOMEM_compression_workspace_init;
        }
 
-       if (!mempool_initialized(&c->decompress_workspace)) {
-               ret = mempool_init_kvpmalloc_pool(
-                               &c->decompress_workspace,
-                               1, decompress_workspace_size);
-               if (ret)
-                       goto out;
-       }
-out:
+       if (!mempool_initialized(&c->decompress_workspace) &&
+           mempool_init_kvpmalloc_pool(&c->decompress_workspace,
+                                       1, decompress_workspace_size))
+               return -BCH_ERR_ENOMEM_decompression_workspace_init;
+
+       return 0;
+}
+
+static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
+{
+       int ret;
+
+       pr_verbose_init(c->opts, "");
+       ret = _bch2_fs_compress_init(c, features);
        pr_verbose_init(c->opts, "ret %i", ret);
+
        return ret;
 }
 
index edd1b2537f482991c48d0c67b60f1a3d9a857667..e5587bc5a2b73251e29dad986dd458a931c85945 100644 (file)
@@ -96,7 +96,7 @@ int bch2_fs_counters_init(struct bch_fs *c)
 {
        c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64));
        if (!c->counters)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_fs_counters_init;
 
        return bch2_sb_counters_to_cpu(c);
 }
index fcd5dbff248d2d70c6afa88eb056b933c98fc83e..1a8f8b3750da15ebf6e9731cdab43de35e391b79 100644 (file)
@@ -68,7 +68,7 @@ static int bch2_sb_disk_groups_validate(struct bch_sb *sb,
 
        sorted = kmalloc_array(nr_groups, sizeof(*sorted), GFP_KERNEL);
        if (!sorted)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_disk_groups_validate;
 
        memcpy(sorted, groups->entries, nr_groups * sizeof(*sorted));
        sort(sorted, nr_groups, sizeof(*sorted), group_cmp, NULL);
@@ -134,7 +134,7 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
        cpu_g = kzalloc(sizeof(*cpu_g) +
                        sizeof(cpu_g->entries[0]) * nr_groups, GFP_KERNEL);
        if (!cpu_g)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_disk_groups_to_cpu;
 
        cpu_g->nr = nr_groups;
 
index 09c6f93c123494fdaf891581cb05d2ee25872d2a..1e621dcc1d3724a44605356fc5e09cbaf54b8bd3 100644 (file)
@@ -494,7 +494,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
 
        buf = kzalloc(sizeof(*buf), GFP_NOIO);
        if (!buf)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_ec_read_extent;
 
        ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
        if (ret) {
@@ -559,7 +559,7 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
 
        if (idx >= h->size) {
                if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
 
                mutex_lock(&c->ec_stripes_heap_lock);
                if (n.size > h->size) {
@@ -573,11 +573,11 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
        }
 
        if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
 
        if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
            !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
 
        return 0;
 }
@@ -1326,7 +1326,7 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
 
        s = kzalloc(sizeof(*s), GFP_KERNEL);
        if (!s)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
 
        mutex_init(&s->lock);
        closure_init(&s->iodone, NULL);
@@ -1688,8 +1688,8 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
                return h;
 
        if (!h->s) {
-               if (ec_new_stripe_alloc(c, h)) {
-                       ret = -ENOMEM;
+               ret = ec_new_stripe_alloc(c, h);
+               if (ret) {
                        bch_err(c, "failed to allocate new stripe");
                        goto err;
                }
index 162e315601f9de495700f2076ed1f59c4dcaf9f2..4304e25a6b24097edb1dc619f31a6f1e62568a3d 100644 (file)
@@ -4,6 +4,79 @@
 
 #define BCH_ERRCODES()                                                         \
        x(ENOMEM,                       ENOMEM_stripe_buf)                      \
+       x(ENOMEM,                       ENOMEM_replicas_table)                  \
+       x(ENOMEM,                       ENOMEM_cpu_replicas)                    \
+       x(ENOMEM,                       ENOMEM_replicas_gc)                     \
+       x(ENOMEM,                       ENOMEM_disk_groups_validate)            \
+       x(ENOMEM,                       ENOMEM_disk_groups_to_cpu)              \
+       x(ENOMEM,                       ENOMEM_mark_snapshot)                   \
+       x(ENOMEM,                       ENOMEM_mark_stripe)                     \
+       x(ENOMEM,                       ENOMEM_mark_stripe_ptr)                 \
+       x(ENOMEM,                       ENOMEM_btree_key_cache_create)          \
+       x(ENOMEM,                       ENOMEM_btree_key_cache_fill)            \
+       x(ENOMEM,                       ENOMEM_btree_key_cache_insert)          \
+       x(ENOMEM,                       ENOMEM_trans_kmalloc)                   \
+       x(ENOMEM,                       ENOMEM_trans_log_msg)                   \
+       x(ENOMEM,                       ENOMEM_do_encrypt)                      \
+       x(ENOMEM,                       ENOMEM_ec_read_extent)                  \
+       x(ENOMEM,                       ENOMEM_ec_stripe_mem_alloc)             \
+       x(ENOMEM,                       ENOMEM_ec_new_stripe_alloc)             \
+       x(ENOMEM,                       ENOMEM_fs_btree_cache_init)             \
+       x(ENOMEM,                       ENOMEM_fs_btree_key_cache_init)         \
+       x(ENOMEM,                       ENOMEM_fs_counters_init)                \
+       x(ENOMEM,                       ENOMEM_fs_btree_write_buffer_init)      \
+       x(ENOMEM,                       ENOMEM_io_clock_init)                   \
+       x(ENOMEM,                       ENOMEM_blacklist_table_init)            \
+       x(ENOMEM,                       ENOMEM_sb_realloc_injected)             \
+       x(ENOMEM,                       ENOMEM_sb_bio_realloc)                  \
+       x(ENOMEM,                       ENOMEM_sb_buf_realloc)                  \
+       x(ENOMEM,                       ENOMEM_sb_journal_validate)             \
+       x(ENOMEM,                       ENOMEM_sb_journal_v2_validate)          \
+       x(ENOMEM,                       ENOMEM_journal_entry_add)               \
+       x(ENOMEM,                       ENOMEM_journal_read_buf_realloc)        \
+       x(ENOMEM,                       ENOMEM_btree_interior_update_worker_init)\
+       x(ENOMEM,                       ENOMEM_btree_interior_update_pool_init) \
+       x(ENOMEM,                       ENOMEM_bio_read_init)                   \
+       x(ENOMEM,                       ENOMEM_bio_read_split_init)             \
+       x(ENOMEM,                       ENOMEM_bio_write_init)                  \
+       x(ENOMEM,                       ENOMEM_bio_bounce_pages_init)           \
+       x(ENOMEM,                       ENOMEM_writepage_bioset_init)           \
+       x(ENOMEM,                       ENOMEM_dio_read_bioset_init)            \
+       x(ENOMEM,                       ENOMEM_dio_write_bioset_init)           \
+       x(ENOMEM,                       ENOMEM_nocow_flush_bioset_init)         \
+       x(ENOMEM,                       ENOMEM_promote_table_init)              \
+       x(ENOMEM,                       ENOMEM_compression_bounce_read_init)    \
+       x(ENOMEM,                       ENOMEM_compression_bounce_write_init)   \
+       x(ENOMEM,                       ENOMEM_compression_workspace_init)      \
+       x(ENOMEM,                       ENOMEM_decompression_workspace_init)    \
+       x(ENOMEM,                       ENOMEM_bucket_gens)                     \
+       x(ENOMEM,                       ENOMEM_buckets_nouse)                   \
+       x(ENOMEM,                       ENOMEM_usage_init)                      \
+       x(ENOMEM,                       ENOMEM_btree_node_read_all_replicas)    \
+       x(ENOMEM,                       ENOMEM_btree_node_reclaim)              \
+       x(ENOMEM,                       ENOMEM_btree_node_mem_alloc)            \
+       x(ENOMEM,                       ENOMEM_btree_cache_cannibalize_lock)    \
+       x(ENOMEM,                       ENOMEM_buckets_waiting_for_journal_init)\
+       x(ENOMEM,                       ENOMEM_buckets_waiting_for_journal_set) \
+       x(ENOMEM,                       ENOMEM_set_nr_journal_buckets)          \
+       x(ENOMEM,                       ENOMEM_dev_journal_init)                \
+       x(ENOMEM,                       ENOMEM_journal_pin_fifo)                \
+       x(ENOMEM,                       ENOMEM_journal_buf)                     \
+       x(ENOMEM,                       ENOMEM_gc_start)                        \
+       x(ENOMEM,                       ENOMEM_gc_alloc_start)                  \
+       x(ENOMEM,                       ENOMEM_gc_reflink_start)                \
+       x(ENOMEM,                       ENOMEM_gc_gens)                         \
+       x(ENOMEM,                       ENOMEM_gc_repair_key)                   \
+       x(ENOMEM,                       ENOMEM_fsck_extent_ends_at)             \
+       x(ENOMEM,                       ENOMEM_fsck_add_nlink)                  \
+       x(ENOMEM,                       ENOMEM_journal_key_insert)              \
+       x(ENOMEM,                       ENOMEM_journal_keys_sort)               \
+       x(ENOMEM,                       ENOMEM_journal_replay)                  \
+       x(ENOMEM,                       ENOMEM_read_superblock_clean)           \
+       x(ENOMEM,                       ENOMEM_fs_alloc)                        \
+       x(ENOMEM,                       ENOMEM_fs_name_alloc)                   \
+       x(ENOMEM,                       ENOMEM_fs_other_alloc)                  \
+       x(ENOMEM,                       ENOMEM_dev_alloc)                       \
        x(ENOSPC,                       ENOSPC_disk_reservation)                \
        x(ENOSPC,                       ENOSPC_bucket_alloc)                    \
        x(ENOSPC,                       ENOSPC_disk_label_add)                  \
        x(ENOSPC,                       ENOSPC_subvolume_create)                \
        x(ENOSPC,                       ENOSPC_sb)                              \
        x(ENOSPC,                       ENOSPC_sb_journal)                      \
+       x(ENOSPC,                       ENOSPC_sb_journal_seq_blacklist)        \
        x(ENOSPC,                       ENOSPC_sb_quota)                        \
        x(ENOSPC,                       ENOSPC_sb_replicas)                     \
        x(ENOSPC,                       ENOSPC_sb_members)                      \
+       x(ENOSPC,                       ENOSPC_sb_crypt)                        \
        x(0,                            open_buckets_empty)                     \
        x(0,                            freelist_empty)                         \
        x(BCH_ERR_freelist_empty,       no_buckets_found)                       \
index e2c09ea4a3e013bd9bb55c2518da017e59dd69c7..b35b584176ee494f41087e95a6b7f29f8fc63c94 100644 (file)
@@ -690,7 +690,21 @@ unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
        unsigned durability = 0;
 
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
-               durability += bch2_extent_ptr_durability(c,& p);
+               durability += bch2_extent_ptr_durability(c, &p);
+
+       return durability;
+}
+
+static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
+{
+       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+       const union bch_extent_entry *entry;
+       struct extent_ptr_decoded p;
+       unsigned durability = 0;
+
+       bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+               if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
+                       durability += bch2_extent_ptr_durability(c, &p);
 
        return durability;
 }
@@ -990,7 +1004,7 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
        bool first = true;
 
        if (c)
-               prt_printf(out, "durability: %u ", bch2_bkey_durability(c, k));
+               prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
 
        bkey_extent_entry_for_each(ptrs, entry) {
                if (!first)
index b511735b377204f42cfd5bfa2ae50231fc593700..df2f317f544347c2fc9d3ca3ccf06ba98cc2d2af 100644 (file)
@@ -3706,16 +3706,22 @@ int bch2_fs_fsio_init(struct bch_fs *c)
 
        if (bioset_init(&c->writepage_bioset,
                        4, offsetof(struct bch_writepage_io, op.wbio.bio),
-                       BIOSET_NEED_BVECS) ||
-           bioset_init(&c->dio_read_bioset,
+                       BIOSET_NEED_BVECS))
+               return -BCH_ERR_ENOMEM_writepage_bioset_init;
+
+       if (bioset_init(&c->dio_read_bioset,
                        4, offsetof(struct dio_read, rbio.bio),
-                       BIOSET_NEED_BVECS) ||
-           bioset_init(&c->dio_write_bioset,
+                       BIOSET_NEED_BVECS))
+               return -BCH_ERR_ENOMEM_dio_read_bioset_init;
+
+       if (bioset_init(&c->dio_write_bioset,
                        4, offsetof(struct dio_write, op.wbio.bio),
-                       BIOSET_NEED_BVECS) ||
-           bioset_init(&c->nocow_flush_bioset,
+                       BIOSET_NEED_BVECS))
+               return -BCH_ERR_ENOMEM_dio_write_bioset_init;
+
+       if (bioset_init(&c->nocow_flush_bioset,
                        1, offsetof(struct nocow_flush, bio), 0))
-               ret = -ENOMEM;
+               return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
 
        pr_verbose_init(c->opts, "ret %i", ret);
        return ret;
index 5e6dc6c316d12052d0bdaf85d8d7bab2a130efbe..ed2523ac2249cae71605e9b996b459ad544e4fa6 100644 (file)
@@ -1237,7 +1237,7 @@ static int extent_ends_at(extent_ends *extent_ends,
                              sizeof(seen->ids.data[0]) * seen->ids.size,
                              GFP_KERNEL);
        if (!n.seen.ids.data)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
 
        darray_for_each(*extent_ends, i) {
                if (i->snapshot == k.k->p.snapshot) {
@@ -2141,7 +2141,7 @@ static int add_nlink(struct bch_fs *c, struct nlink_table *t,
                if (!d) {
                        bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
                                new_size);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_fsck_add_nlink;
                }
 
                if (t->d)
index 76856bfd6dc5d17b9ee37477112e0d9ab080b510..d11feb100d7ec928e5a4bc3eac60c7693531620f 100644 (file)
@@ -3024,18 +3024,26 @@ void bch2_fs_io_exit(struct bch_fs *c)
 int bch2_fs_io_init(struct bch_fs *c)
 {
        if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
-                       BIOSET_NEED_BVECS) ||
-           bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
-                       BIOSET_NEED_BVECS) ||
-           bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
-                       BIOSET_NEED_BVECS) ||
-           mempool_init_page_pool(&c->bio_bounce_pages,
+                       BIOSET_NEED_BVECS))
+               return -BCH_ERR_ENOMEM_bio_read_init;
+
+       if (bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
+                       BIOSET_NEED_BVECS))
+               return -BCH_ERR_ENOMEM_bio_read_split_init;
+
+       if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
+                       BIOSET_NEED_BVECS))
+               return -BCH_ERR_ENOMEM_bio_write_init;
+
+       if (mempool_init_page_pool(&c->bio_bounce_pages,
                                   max_t(unsigned,
                                         c->opts.btree_node_size,
                                         c->opts.encoded_extent_max) /
-                                  PAGE_SIZE, 0) ||
-           rhashtable_init(&c->promote_table, &bch_promote_params))
-               return -ENOMEM;
+                                  PAGE_SIZE, 0))
+               return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
+
+       if (rhashtable_init(&c->promote_table, &bch_promote_params))
+               return -BCH_ERR_ENOMEM_promote_table_init;
 
        return 0;
 }
index 5699a9d8d9f957aa747765fc38e28f6dc37a0352..c9c2ee9c67f633ac838ee94c52de42b63da993fa 100644 (file)
@@ -769,7 +769,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
        new_buckets     = kcalloc(nr, sizeof(u64), GFP_KERNEL);
        new_bucket_seq  = kcalloc(nr, sizeof(u64), GFP_KERNEL);
        if (!bu || !ob || !new_buckets || !new_bucket_seq) {
-               ret = -ENOMEM;
+               ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
                goto err_free;
        }
 
@@ -942,7 +942,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
        unsigned nr;
 
        if (dynamic_fault("bcachefs:add:journal_alloc"))
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_set_nr_journal_buckets;
 
        /* 1/128th of the device by default: */
        nr = ca->mi.nbuckets >> 7;
@@ -1034,7 +1034,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
                init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
                if (!j->pin.data) {
                        bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_journal_pin_fifo;
                }
        }
 
@@ -1128,19 +1128,19 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
 
        ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
        if (!ja->bucket_seq)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_dev_journal_init;
 
        nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
 
        ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
        if (!ca->journal.bio)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_dev_journal_init;
 
        bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
 
        ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
        if (!ja->buckets)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_dev_journal_init;
 
        if (journal_buckets_v2) {
                unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
@@ -1194,7 +1194,7 @@ int bch2_fs_journal_init(struct journal *j)
                 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
 
        if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
-               ret = -ENOMEM;
+               ret = -BCH_ERR_ENOMEM_journal_pin_fifo;
                goto out;
        }
 
@@ -1202,7 +1202,7 @@ int bch2_fs_journal_init(struct journal *j)
                j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
                j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
                if (!j->buf[i].data) {
-                       ret = -ENOMEM;
+                       ret = -BCH_ERR_ENOMEM_journal_buf;
                        goto out;
                }
        }
index cfd92d8b44382d224296df1404c77eab1a1cd031..45b1b839783de66f712aaac1b337cf522e365d84 100644 (file)
@@ -119,7 +119,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
                                journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
                                GFP_KERNEL);
        if (!_i)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_journal_entry_add;
 
        /*
         * Duplicate journal entries? If so we want the one that didn't have a
@@ -149,7 +149,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
 replace:
        i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
        if (!i)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_journal_entry_add;
 
        i->nr_ptrs      = 0;
        i->csum_good    = entry_ptr.csum_good;
@@ -836,12 +836,12 @@ static int journal_read_buf_realloc(struct journal_read_buf *b,
 
        /* the bios are sized for this many pages, max: */
        if (new_size > JOURNAL_ENTRY_SIZE_MAX)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
 
        new_size = roundup_pow_of_two(new_size);
        n = kvpmalloc(new_size, GFP_KERNEL);
        if (!n)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
 
        kvpfree(b->data, b->size);
        b->data = n;
index 5be7882342e0f1d2b26e0ab80c9c08d4d77e9a39..fcefbbe7eda8de8fce336380fb7e97cc4eefd50b 100644 (file)
@@ -33,7 +33,7 @@ static int bch2_sb_journal_validate(struct bch_sb *sb,
 
        b = kmalloc_array(nr, sizeof(u64), GFP_KERNEL);
        if (!b)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_sb_journal_validate;
 
        for (i = 0; i < nr; i++)
                b[i] = le64_to_cpu(journal->buckets[i]);
@@ -116,7 +116,7 @@ static int bch2_sb_journal_v2_validate(struct bch_sb *sb,
 
        b = kmalloc_array(nr, sizeof(*b), GFP_KERNEL);
        if (!b)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_sb_journal_v2_validate;
 
        for (i = 0; i < nr; i++) {
                b[i].start = le64_to_cpu(journal->d[i].start);
index 012c870acce043d2c71159c43c913ba556510a9a..d6b9f2cdf8e7df2664abd4f30df9d67559e0d926 100644 (file)
@@ -103,7 +103,7 @@ int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
        bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
                                        sb_blacklist_u64s(nr + 1));
        if (!bl) {
-               ret = -ENOMEM;
+               ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
                goto out;
        }
 
@@ -168,7 +168,7 @@ int bch2_blacklist_table_initialize(struct bch_fs *c)
        t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr,
                    GFP_KERNEL);
        if (!t)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_blacklist_table_init;
 
        t->nr = nr;
 
index bb5061bc24d738c2156abe62358d36633e12b640..6f5851ea81b5d5cd713f0e081d6764a57f278f7b 100644 (file)
@@ -60,7 +60,6 @@ struct moving_io {
 static void move_free(struct moving_io *io)
 {
        struct moving_context *ctxt = io->write.ctxt;
-       struct bch_fs *c = ctxt->c;
 
        if (io->b)
                atomic_dec(&io->b->count);
@@ -296,6 +295,7 @@ static int bch2_move_extent(struct btree_trans *trans,
        if (!io)
                goto err;
 
+       INIT_LIST_HEAD(&io->io_list);
        io->write.ctxt          = ctxt;
        io->read_sectors        = k.k->size;
        io->write_sectors       = k.k->size;
index aafe4054d25def18426fa42126bc13c902d8ebd8..73f7663cbd3f4bad5d6bfe8c9de303e65913ca34 100644 (file)
@@ -228,7 +228,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
                if (!new_keys.d) {
                        bch_err(c, "%s: error allocating new key array (size %zu)",
                                __func__, new_keys.size);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_journal_key_insert;
                }
 
                /* Since @keys was full, there was no gap: */
@@ -266,7 +266,7 @@ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
 
        n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
        if (!n)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_journal_key_insert;
 
        bkey_copy(n, k);
        ret = bch2_journal_key_insert_take(c, id, level, n);
@@ -502,8 +502,11 @@ static int journal_keys_sort(struct bch_fs *c)
        keys->size = roundup_pow_of_two(nr_keys);
 
        keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
-       if (!keys->d)
-               return -ENOMEM;
+       if (!keys->d) {
+               bch_err(c, "Failed to allocate buffer for sorted journal keys (%zu keys)",
+                       nr_keys);
+               return -BCH_ERR_ENOMEM_journal_keys_sort;
+       }
 
        genradix_for_each(&c->journal_entries, iter, _i) {
                i = *_i;
@@ -601,7 +604,7 @@ static int bch2_journal_replay(struct bch_fs *c, u64 start_seq, u64 end_seq)
 
        keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
        if (!keys_sorted)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_journal_replay;
 
        for (i = 0; i < keys->nr; i++)
                keys_sorted[i] = &keys->d[i];
@@ -905,7 +908,7 @@ static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
                        GFP_KERNEL);
        if (!clean) {
                mutex_unlock(&c->sb_lock);
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean);
        }
 
        ret = bch2_sb_clean_validate_late(c, clean, READ);
index 3bff21959d986d388043e978d1525888702b2be8..8935ff5899c99debc21643781896dee8079eeeb6 100644 (file)
@@ -336,7 +336,7 @@ out:
        return ret;
 err:
        bch_err(c, "error updating replicas table: memory allocation failure");
-       ret = -ENOMEM;
+       ret = -BCH_ERR_ENOMEM_replicas_table;
        goto out;
 }
 
@@ -383,14 +383,18 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c,
        if (c->replicas_gc.entries &&
            !__replicas_has_entry(&c->replicas_gc, new_entry)) {
                new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
-               if (!new_gc.entries)
+               if (!new_gc.entries) {
+                       ret = -BCH_ERR_ENOMEM_cpu_replicas;
                        goto err;
+               }
        }
 
        if (!__replicas_has_entry(&c->replicas, new_entry)) {
                new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
-               if (!new_r.entries)
+               if (!new_r.entries) {
+                       ret = -BCH_ERR_ENOMEM_cpu_replicas;
                        goto err;
+               }
 
                ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
                if (ret)
@@ -425,8 +429,7 @@ out:
 
        return ret;
 err:
-       bch_err(c, "error adding replicas entry: memory allocation failure");
-       ret = -ENOMEM;
+       bch_err(c, "error adding replicas entry: %s", bch2_err_str(ret));
        goto out;
 }
 
@@ -478,7 +481,7 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret)
                    bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
                        n = cpu_replicas_add_entry(&c->replicas_gc, e);
                        if (!n.entries) {
-                               ret = -ENOMEM;
+                               ret = -BCH_ERR_ENOMEM_cpu_replicas;
                                goto err;
                        }
 
@@ -533,7 +536,7 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
        if (!c->replicas_gc.entries) {
                mutex_unlock(&c->sb_lock);
                bch_err(c, "error allocating c->replicas_gc");
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_replicas_gc;
        }
 
        for_each_cpu_replicas_entry(&c->replicas, e)
@@ -562,7 +565,7 @@ retry:
        new.entries     = kcalloc(nr, new.entry_size, GFP_KERNEL);
        if (!new.entries) {
                bch_err(c, "error allocating c->replicas_gc");
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_replicas_gc;
        }
 
        mutex_lock(&c->sb_lock);
@@ -621,7 +624,7 @@ int bch2_replicas_set_usage(struct bch_fs *c,
 
                n = cpu_replicas_add_entry(&c->replicas, r);
                if (!n.entries)
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_cpu_replicas;
 
                ret = replicas_table_update(c, &n);
                if (ret)
@@ -655,7 +658,7 @@ __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
 
        cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
        if (!cpu_r->entries)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_cpu_replicas;
 
        cpu_r->nr               = nr;
        cpu_r->entry_size       = entry_size;
@@ -687,7 +690,7 @@ __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
 
        cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
        if (!cpu_r->entries)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_cpu_replicas;
 
        cpu_r->nr               = nr;
        cpu_r->entry_size       = entry_size;
@@ -717,9 +720,8 @@ int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
                ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
        else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
                ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
-
        if (ret)
-               return -ENOMEM;
+               return ret;
 
        bch2_cpu_replicas_sort(&new_r);
 
@@ -881,8 +883,9 @@ static int bch2_sb_replicas_validate(struct bch_sb *sb, struct bch_sb_field *f,
        struct bch_replicas_cpu cpu_r;
        int ret;
 
-       if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
-               return -ENOMEM;
+       ret = __bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r);
+       if (ret)
+               return ret;
 
        ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
        kfree(cpu_r.entries);
@@ -919,8 +922,9 @@ static int bch2_sb_replicas_v0_validate(struct bch_sb *sb, struct bch_sb_field *
        struct bch_replicas_cpu cpu_r;
        int ret;
 
-       if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
-               return -ENOMEM;
+       ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r);
+       if (ret)
+               return ret;
 
        ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
        kfree(cpu_r.entries);
index bcc67c0f5dfc95992c05827ade2b759fd20e913a..43d83705a7aee6ce209ca3c3023b058ac3eda1f0 100644 (file)
@@ -87,7 +87,7 @@ int bch2_mark_snapshot(struct btree_trans *trans,
                               U32_MAX - new.k->p.offset,
                               GFP_KERNEL);
        if (!t)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_mark_snapshot;
 
        if (new.k->type == KEY_TYPE_snapshot) {
                struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
index b9af78203fb895fbec60da9d6d8435e02e271cd8..519df09917e3cbaed9472647968f1c4a556695f9 100644 (file)
@@ -136,14 +136,14 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
                return 0;
 
        if (dynamic_fault("bcachefs:add:super_realloc"))
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_sb_realloc_injected;
 
        if (sb->have_bio) {
                unsigned nr_bvecs = DIV_ROUND_UP(new_buffer_size, PAGE_SIZE);
 
                bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
                if (!bio)
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_sb_bio_realloc;
 
                bio_init(bio, NULL, bio->bi_inline_vecs, nr_bvecs, 0);
 
@@ -153,7 +153,7 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
 
        new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO);
        if (!new_sb)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_sb_buf_realloc;
 
        sb->sb = new_sb;
        sb->buffer_size = new_buffer_size;
@@ -559,8 +559,9 @@ reread:
        }
 
        if (bytes > sb->buffer_size) {
-               if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s)))
-                       return -ENOMEM;
+               ret = bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s));
+               if (ret)
+                       return ret;
                goto reread;
        }
 
index 3a7f4e295cbd2840ca27fddcf9f8ccb983f15778..8a269b680295f4ae7da7de1a81713a49db797bbb 100644 (file)
@@ -647,7 +647,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
 
        c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
        if (!c) {
-               c = ERR_PTR(-ENOMEM);
+               c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
                goto out;
        }
 
@@ -737,7 +737,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        strscpy(c->name, name.buf, sizeof(c->name));
        printbuf_exit(&name);
 
-       ret = name.allocation_failure ? -ENOMEM : 0;
+       ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
        if (ret)
                goto err;
 
@@ -801,7 +801,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
            mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
            !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
                                              sizeof(u64), GFP_KERNEL))) {
-               ret = -ENOMEM;
+               ret = -BCH_ERR_ENOMEM_fs_other_alloc;
                goto err;
        }
 
@@ -1182,7 +1182,7 @@ out:
 err:
        if (ca)
                bch2_dev_free(ca);
-       ret = -ENOMEM;
+       ret = -BCH_ERR_ENOMEM_dev_alloc;
        goto out;
 }