]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update bcachefs sources to d3422f9b18 bcachefs: Journal initialization fixes
authorKent Overstreet <kent.overstreet@gmail.com>
Wed, 22 Dec 2021 02:53:07 +0000 (21:53 -0500)
committerKent Overstreet <kent.overstreet@gmail.com>
Wed, 22 Dec 2021 02:53:07 +0000 (21:53 -0500)
12 files changed:
.bcachefs_revision
libbcachefs/alloc_foreground.c
libbcachefs/bcachefs.h
libbcachefs/btree_cache.c
libbcachefs/btree_iter.c
libbcachefs/btree_iter.h
libbcachefs/btree_key_cache.c
libbcachefs/btree_types.h
libbcachefs/btree_update_interior.c
libbcachefs/btree_update_leaf.c
libbcachefs/journal.c
libbcachefs/journal_io.c

index 4bae87bd4cc2cacc4fb9f5b780c31bbcc83896f6..09903fec6d291d0e0268c0d4f3a57599088b5b29 100644 (file)
@@ -1 +1 @@
-ff3a76e1af04f51506f45e0f71d53f7e6dd51a75
+d3422f9b18ea3154abe19d859f1a61c4fae9ccdc
index 2bb107b8b0b9f2aa6bbd66915cd26f8bc12b0d48..dce77cc27cbe9c911bdc7ab638015cac270988c6 100644 (file)
@@ -152,6 +152,7 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
 
        for (b = buckets->first_bucket; b < buckets->nbuckets; b++)
                if (is_available_bucket(buckets->b[b].mark) &&
+                   (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)) &&
                    !buckets->b[b].mark.owned_by_allocator)
                        goto success;
        b = -1;
index 5c01f0564752b7219cdd0e092f4c815674dcf239..540492b04457c68abd6307b1098e154b3f9e8189 100644 (file)
@@ -705,6 +705,7 @@ struct bch_fs {
        struct btree_path_buf  __percpu *btree_paths_bufs;
 
        struct srcu_struct      btree_trans_barrier;
+       bool                    btree_trans_barrier_initialized;
 
        struct btree_key_cache  btree_key_cache;
 
index 5ae61e5d39232b39924282d4eb282aefeffd9529..3411d5a02203efc675c42ff16e47e075d600865b 100644 (file)
@@ -768,16 +768,17 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
 
        EBUG_ON(level >= BTREE_MAX_DEPTH);
 
-       if (c->opts.btree_node_mem_ptr_optimization) {
-               b = btree_node_mem_ptr(k);
-               /*
-                * Check b->hash_val _before_ calling btree_node_lock() - this
-                * might not be the node we want anymore, and trying to lock the
-                * wrong node could cause an unneccessary transaction restart:
-                */
-               if (b && b->hash_val == btree_ptr_hash_val(k))
+       b = btree_node_mem_ptr(k);
+
+       /*
+        * Check b->hash_val _before_ calling btree_node_lock() - this might not
+        * be the node we want anymore, and trying to lock the wrong node could
+        * cause an unneccessary transaction restart:
+        */
+       if (likely(c->opts.btree_node_mem_ptr_optimization &&
+                  b &&
+                  b->hash_val == btree_ptr_hash_val(k)))
                        goto lock_node;
-       }
 retry:
        b = btree_cache_find(bc, k);
        if (unlikely(!b)) {
index bdbb90014b5c0a38197bee08e49145e707ba6468..e9091f8a153e265271cb04ca9ca203dbfed58948 100644 (file)
@@ -1818,12 +1818,14 @@ static struct btree_path *btree_path_alloc(struct btree_trans *trans,
        return path;
 }
 
-struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached,
+struct btree_path *bch2_path_get(struct btree_trans *trans,
                                 enum btree_id btree_id, struct bpos pos,
                                 unsigned locks_want, unsigned level,
-                                bool intent, unsigned long ip)
+                                unsigned flags, unsigned long ip)
 {
        struct btree_path *path, *path_pos = NULL;
+       bool cached = flags & BTREE_ITER_CACHED;
+       bool intent = flags & BTREE_ITER_INTENT;
        int i;
 
        BUG_ON(trans->restarted);
@@ -1845,7 +1847,6 @@ struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached,
            path_pos->level     == level) {
                __btree_path_get(path_pos, intent);
                path = btree_path_set_pos(trans, path_pos, pos, intent, ip);
-               path->preserve = true;
        } else {
                path = btree_path_alloc(trans, path_pos);
                path_pos = NULL;
@@ -1854,7 +1855,6 @@ struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached,
                path->pos                       = pos;
                path->btree_id                  = btree_id;
                path->cached                    = cached;
-               path->preserve                  = true;
                path->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
                path->should_be_locked          = false;
                path->level                     = level;
@@ -1869,6 +1869,9 @@ struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached,
                btree_trans_verify_sorted(trans);
        }
 
+       if (!(flags & BTREE_ITER_NOPRESERVE))
+               path->preserve = true;
+
        if (path->intent_ref)
                locks_want = max(locks_want, level + 1);
 
@@ -2625,13 +2628,8 @@ static void __bch2_trans_iter_init(struct btree_trans *trans,
        iter->ip_allocated = ip;
 #endif
 
-       iter->path = bch2_path_get(trans,
-                                  flags & BTREE_ITER_CACHED,
-                                  btree_id,
-                                  iter->pos,
-                                  locks_want,
-                                  depth,
-                                  flags & BTREE_ITER_INTENT, ip);
+       iter->path = bch2_path_get(trans, btree_id, iter->pos,
+                                  locks_want, depth, flags, ip);
 }
 
 void bch2_trans_iter_init(struct btree_trans *trans,
@@ -2958,22 +2956,27 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
 
 void bch2_fs_btree_iter_exit(struct bch_fs *c)
 {
+       if (c->btree_trans_barrier_initialized)
+               cleanup_srcu_struct(&c->btree_trans_barrier);
        mempool_exit(&c->btree_trans_mem_pool);
        mempool_exit(&c->btree_paths_pool);
-       cleanup_srcu_struct(&c->btree_trans_barrier);
 }
 
 int bch2_fs_btree_iter_init(struct bch_fs *c)
 {
        unsigned nr = BTREE_ITER_MAX;
+       int ret;
 
        INIT_LIST_HEAD(&c->btree_trans_list);
        mutex_init(&c->btree_trans_lock);
 
-       return  init_srcu_struct(&c->btree_trans_barrier) ?:
-               mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
+       ret   = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
                        sizeof(struct btree_path) * nr +
                        sizeof(struct btree_insert_entry) * nr) ?:
                mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
-                                         BTREE_TRANS_MEM_MAX);
+                                         BTREE_TRANS_MEM_MAX) ?:
+               init_srcu_struct(&c->btree_trans_barrier);
+       if (!ret)
+               c->btree_trans_barrier_initialized = true;
+       return ret;
 }
index 26eb90a7eab8e4eab2b66a16b399843bab0de46e..4c903b9dd7160774297c8b1e14df8936faa516d2 100644 (file)
@@ -134,9 +134,8 @@ bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
                         bool, unsigned long);
 int __must_check bch2_btree_path_traverse(struct btree_trans *,
                                          struct btree_path *, unsigned);
-struct btree_path *bch2_path_get(struct btree_trans *, bool, enum btree_id,
-                                struct bpos, unsigned, unsigned, bool,
-                                unsigned long);
+struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
+                                unsigned, unsigned, unsigned, unsigned long);
 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
 
 #ifdef CONFIG_BCACHEFS_DEBUG
index 4f1bc1d165aa6331fe140c507987b9786b6f3f11..230a920ae32ac4dd4f80ff7dced275360c35a074 100644 (file)
@@ -662,11 +662,12 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
 
        rcu_read_lock();
        tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
-       for (i = 0; i < tbl->size; i++)
-               rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
-                       bkey_cached_evict(bc, ck);
-                       list_add(&ck->list, &bc->freed);
-               }
+       if (tbl)
+               for (i = 0; i < tbl->size; i++)
+                       rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
+                               bkey_cached_evict(bc, ck);
+                               list_add(&ck->list, &bc->freed);
+                       }
        rcu_read_unlock();
 
        list_for_each_entry_safe(ck, n, &bc->freed, list) {
index 22dbbe365bbe9722de2340da53b9e5f80e98af00..c84bba7bcda5bf227d5d0a01251f2f77386d2362 100644 (file)
@@ -210,6 +210,7 @@ struct btree_node_iter {
 #define __BTREE_ITER_ALL_SNAPSHOTS     (1 << 11)
 #define BTREE_ITER_ALL_SNAPSHOTS       (1 << 12)
 #define BTREE_ITER_FILTER_SNAPSHOTS    (1 << 13)
+#define BTREE_ITER_NOPRESERVE          (1 << 14)
 
 enum btree_path_uptodate {
        BTREE_ITER_UPTODATE             = 0,
index d895d4eff0a924bc5c989234d172cb738ce95bc6..f5d879dee423c8b700e7879ac35e004275e3372d 100644 (file)
@@ -1609,8 +1609,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
                ? bpos_predecessor(b->data->min_key)
                : bpos_successor(b->data->max_key);
 
-       sib_path = bch2_path_get(trans, false, path->btree_id, sib_pos,
-                                U8_MAX, level, true, _THIS_IP_);
+       sib_path = bch2_path_get(trans, path->btree_id, sib_pos,
+                                U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_);
        ret = bch2_btree_path_traverse(trans, sib_path, false);
        if (ret)
                goto err;
index 95d19887bd40b3ffed5aaf5dff5427480af67ced..1966441b1a620e3d8875cf6eb670a25de546e005 100644 (file)
@@ -1271,24 +1271,23 @@ err:
  * When deleting, check if we need to emit a whiteout (because we're overwriting
  * something in an ancestor snapshot)
  */
-static int need_whiteout_for_snapshot(struct btree_trans *trans, struct btree_iter *orig)
+static int need_whiteout_for_snapshot(struct btree_trans *trans,
+                                     enum btree_id btree_id, struct bpos pos)
 {
        struct btree_iter iter;
        struct bkey_s_c k;
-       u32 snapshot = orig->pos.snapshot;
+       u32 snapshot = pos.snapshot;
        int ret;
 
-       if (!bch2_snapshot_parent(trans->c, snapshot))
+       if (!bch2_snapshot_parent(trans->c, pos.snapshot))
                return 0;
 
-       bch2_trans_copy_iter(&iter, orig);
-       iter.flags &= BTREE_ITER_FILTER_SNAPSHOTS;
-       iter.flags |= BTREE_ITER_ALL_SNAPSHOTS;
+       pos.snapshot++;
 
-       bch2_btree_iter_advance(&iter);
-
-       for_each_btree_key_continue_norestart(iter, 0, k, ret) {
-               if (bkey_cmp(k.k->p, orig->pos))
+       for_each_btree_key_norestart(trans, iter, btree_id, pos,
+                          BTREE_ITER_ALL_SNAPSHOTS|
+                          BTREE_ITER_NOPRESERVE, k, ret) {
+               if (bkey_cmp(k.k->p, pos))
                        break;
 
                if (bch2_snapshot_is_ancestor(trans->c, snapshot,
@@ -1314,7 +1313,6 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
 
        BUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
        BUG_ON(bpos_cmp(k->k.p, iter->path->pos));
-       BUG_ON(bpos_cmp(k->k.p, iter->pos));
 
        n = (struct btree_insert_entry) {
                .flags          = flags,
@@ -1335,7 +1333,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
 
        if (bkey_deleted(&n.k->k) &&
            (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
-               int ret = need_whiteout_for_snapshot(trans, iter);
+               int ret = need_whiteout_for_snapshot(trans, n.btree_id, n.k->k.p);
                if (unlikely(ret < 0))
                        return ret;
 
index ff8b81fa677213e643d164e9031d635421f3581d..40e7cb62f179cca612b55e77e3298d8a010e3070 100644 (file)
@@ -642,6 +642,7 @@ int bch2_journal_flush_seq(struct journal *j, u64 seq)
 
 int bch2_journal_meta(struct journal *j)
 {
+       struct journal_buf *buf;
        struct journal_res res;
        int ret;
 
@@ -651,6 +652,10 @@ int bch2_journal_meta(struct journal *j)
        if (ret)
                return ret;
 
+       buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
+       buf->must_flush = true;
+       set_bit(JOURNAL_NEED_WRITE, &j->flags);
+
        bch2_journal_res_put(j, &res);
 
        return bch2_journal_flush_seq(j, res.seq);
@@ -995,6 +1000,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
        j->replay_journal_seq   = last_seq;
        j->replay_journal_seq_end = cur_seq;
        j->last_seq_ondisk      = last_seq;
+       j->flushed_seq_ondisk   = last_seq;
        j->pin.front            = last_seq;
        j->pin.back             = cur_seq;
        atomic64_set(&j->seq, cur_seq - 1);
@@ -1011,6 +1017,9 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
                if (seq < last_seq)
                        continue;
 
+               if (journal_entry_empty(&i->j))
+                       j->last_empty_seq = le64_to_cpu(i->j.seq);
+
                p = journal_seq_pin(j, seq);
 
                p->devs.nr = 0;
@@ -1018,6 +1027,9 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
                        bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
        }
 
+       if (list_empty(journal_entries))
+               j->last_empty_seq = cur_seq;
+
        spin_lock(&j->lock);
 
        set_bit(JOURNAL_STARTED, &j->flags);
index 80e0dd311ffd6c764d199ca56d340a19e6554187..564de51df7ef5eb657b125959a6265ee4aec362b 100644 (file)
@@ -1448,7 +1448,7 @@ void bch2_journal_write(struct closure *cl)
        SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
        SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
 
-       if (journal_entry_empty(jset))
+       if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
                j->last_empty_seq = le64_to_cpu(jset->seq);
 
        if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))