]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update bcachefs sources to 400f275d46 bcachefs: Fix check_overlapping_extents()
authorKent Overstreet <kent.overstreet@linux.dev>
Sat, 13 May 2023 04:13:57 +0000 (00:13 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sat, 13 May 2023 04:13:57 +0000 (00:13 -0400)
.bcachefs_revision
include/linux/six.h
libbcachefs/fsck.c
libbcachefs/journal_io.c
libbcachefs/quota.c
libbcachefs/replicas.c
libbcachefs/subvolume.c

index 084fc8af9e8298925be6d10b1b46bd6bd0a3234f..711614af9e03b58e10c3846b043399c2c2cd774d 100644 (file)
@@ -1 +1 @@
-5074caad6a72d6e38c21a7e02f5e62048f2046d7
+400f275d46228e0ca08d8c931a674a534db0f4fb
index 16ad2073f71c551004eb2327bd28040b57c38676..83023f64a30e2a20108c79d495487fac6f916728 100644 (file)
@@ -49,7 +49,7 @@
  *   six_trylock_type(lock, type)
  *   six_trylock_convert(lock, from, to)
  *
- * A lock may be held multiple types by the same thread (for read or intent,
+ * A lock may be held multiple times by the same thread (for read or intent,
  * not write). However, the six locks code does _not_ implement the actual
  * recursive checks itself though - rather, if your code (e.g. btree iterator
  * code) knows that the current thread already has a lock held, and for the
index eb3609aa45933afcca1d37b3e3554b15540ba99e..1b3ee66265c95b454c395598c6f390c248cec632 100644 (file)
@@ -1176,7 +1176,7 @@ static int check_overlapping_extents(struct btree_trans *trans,
                                goto err;
                        bkey_reassemble(update, k);
                        ret = bch2_trans_update_extent(trans, iter, update, 0);
-                       if (!ret)
+                       if (ret)
                                goto err;
                }
        }
index ede9d198bb85186d60c68dbc0406d5ae568d892f..b455ef041dfefd6b6ab1c7bae0608406d2decba9 100644 (file)
@@ -1743,7 +1743,16 @@ void bch2_journal_write(struct closure *cl)
        BUG_ON(u64s > j->entry_u64s_reserved);
 
        le32_add_cpu(&jset->u64s, u64s);
-       BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
+
+       sectors = vstruct_sectors(jset, c->block_bits);
+       bytes   = vstruct_bytes(jset);
+
+       if (sectors > w->sectors) {
+               bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
+                                   vstruct_bytes(jset), w->sectors << 9,
+                                   u64s, w->u64s_reserved, j->entry_u64s_reserved);
+               goto err;
+       }
 
        jset->magic             = cpu_to_le64(jset_magic(c));
        jset->version           = c->sb.version < bcachefs_metadata_version_bkey_renumber
@@ -1780,10 +1789,6 @@ void bch2_journal_write(struct closure *cl)
            jset_validate(c, NULL, jset, 0, WRITE))
                goto err;
 
-       sectors = vstruct_sectors(jset, c->block_bits);
-       BUG_ON(sectors > w->sectors);
-
-       bytes = vstruct_bytes(jset);
        memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
 
 retry_alloc:
index cc0db72ce1df8163aa01f3f317051e9248eccaab..310eb9d26571477af0d99ac2ac9b14dc17f61ce8 100644 (file)
@@ -2,6 +2,7 @@
 #include "bcachefs.h"
 #include "btree_update.h"
 #include "errcode.h"
+#include "error.h"
 #include "inode.h"
 #include "quota.h"
 #include "subvolume.h"
@@ -561,6 +562,9 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans,
 
        ret = bch2_snapshot_tree_lookup(trans,
                        snapshot_t(c, k.k->p.snapshot)->tree, &s_t);
+       bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+                       "%s: snapshot tree %u not found", __func__,
+                       snapshot_t(c, k.k->p.snapshot)->tree);
        if (ret)
                return ret;
 
index 8ae50dfd8c8c0f14e4ee6b764a9faa886c909b5f..76efbfce7683bca0c30dc53accd0049ba170810b 100644 (file)
@@ -460,36 +460,11 @@ int bch2_replicas_delta_list_mark(struct bch_fs *c,
 
 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
 {
-       unsigned i;
-
        lockdep_assert_held(&c->replicas_gc_lock);
 
        mutex_lock(&c->sb_lock);
        percpu_down_write(&c->mark_lock);
 
-       /*
-        * this is kind of crappy; the replicas gc mechanism needs to be ripped
-        * out
-        */
-
-       for (i = 0; i < c->replicas.nr; i++) {
-               struct bch_replicas_entry *e =
-                       cpu_replicas_entry(&c->replicas, i);
-               struct bch_replicas_cpu n;
-
-               if (!__replicas_has_entry(&c->replicas_gc, e) &&
-                   bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
-                       n = cpu_replicas_add_entry(&c->replicas_gc, e);
-                       if (!n.entries) {
-                               ret = -BCH_ERR_ENOMEM_cpu_replicas;
-                               goto err;
-                       }
-
-                       swap(n, c->replicas_gc);
-                       kfree(n.entries);
-               }
-       }
-
        ret = bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
        if (ret)
                goto err;
index 5b145d850b24083a0b1a8af201d7f1d4099da05e..388fa12bbd8b497e9eac733461553c487547978e 100644 (file)
@@ -37,14 +37,8 @@ int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k,
 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
                              struct bch_snapshot_tree *s)
 {
-       int ret;
-
-       ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
-                                     BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
-
-       if (bch2_err_matches(ret, ENOENT))
-               bch_err(trans->c, "snapshot tree %u not found", id);
-       return ret;
+       return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
+                                      BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
 }
 
 static struct bkey_i_snapshot_tree *
@@ -434,6 +428,8 @@ static int snapshot_tree_ptr_good(struct btree_trans *trans,
        struct bch_snapshot_tree s_t;
        int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
 
+       if (bch2_err_matches(ret, ENOENT))
+               return 0;
        if (ret)
                return ret;
 
@@ -467,10 +463,10 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans,
        tree_id = le32_to_cpu(root.v->tree);
 
        ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
-       if (ret)
+       if (ret && !bch2_err_matches(ret, ENOENT))
                return ret;
 
-       if (le32_to_cpu(s_t.root_snapshot) != root_id) {
+       if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
                u = bch2_bkey_make_mut_typed(trans, &root_iter, root.s_c, 0, snapshot);
                ret =   PTR_ERR_OR_ZERO(u) ?:
                        snapshot_tree_create(trans, root_id,
@@ -664,6 +660,10 @@ static int check_subvol(struct btree_trans *trans,
                struct bch_snapshot_tree st;
 
                ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
+
+               bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+                               "%s: snapshot tree %u not found", __func__, snapshot_tree);
+
                if (ret)
                        return ret;