]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update bcachefs sources to 99175e5712 bcachefs: Fix bch2_check_discard_freespace_key()
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 18 Jun 2023 17:50:30 +0000 (13:50 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 18 Jun 2023 18:34:05 +0000 (14:34 -0400)
.bcachefs_revision
libbcachefs/alloc_background.c
libbcachefs/btree_cache.c
libbcachefs/btree_iter.c
libbcachefs/btree_locking.c
libbcachefs/btree_locking.h
libbcachefs/btree_update_interior.c
libbcachefs/data_update.c
libbcachefs/extents.c
libbcachefs/extents.h
linux/six.c

index b9058b5e1bc2fed661fb02ce93226f8e9895bc2a..6ec4d5d73f29a2f0ca9dc02f58235b3cef36d90a 100644 (file)
@@ -1 +1 @@
-01d7ad6d95c85cf5434a891e6dd7971797e0e1fa
+99175e5712ecc323930db29565b43d5d0e55a276
index f774a660a68122e561082399e0f84d306ddb8eeb..00cdfcb838eaf991ca84fe3af367bff98d7c6251 100644 (file)
@@ -788,10 +788,12 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
        if (ca->mi.freespace_initialized &&
            test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags) &&
            bch2_trans_inconsistent_on(old.k->type != old_type, trans,
-                       "incorrect key when %s %s btree (got %s should be %s)\n"
+                       "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
                        "  for %s",
                        set ? "setting" : "clearing",
                        bch2_btree_ids[btree],
+                       iter.pos.inode,
+                       iter.pos.offset,
                        bch2_bkey_types[old.k->type],
                        bch2_bkey_types[old_type],
                        (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
@@ -1278,8 +1280,8 @@ fsck_err:
        return ret;
 }
 
-static int bch2_check_discard_freespace_key(struct btree_trans *trans,
-                                           struct btree_iter *iter)
+static int __bch2_check_discard_freespace_key(struct btree_trans *trans,
+                                             struct btree_iter *iter)
 {
        struct bch_fs *c = trans->c;
        struct btree_iter alloc_iter;
@@ -1321,6 +1323,7 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans,
                goto delete;
 out:
 fsck_err:
+       set_btree_iter_dontneed(&alloc_iter);
        bch2_trans_iter_exit(trans, &alloc_iter);
        printbuf_exit(&buf);
        return ret;
@@ -1330,6 +1333,24 @@ delete:
        goto out;
 }
 
+static int bch2_check_discard_freespace_key(struct btree_trans *trans,
+                                           struct btree_iter *iter,
+                                           struct bpos end)
+{
+       if (!btree_node_type_is_extents(iter->btree_id)) {
+               return __bch2_check_discard_freespace_key(trans, iter);
+       } else {
+               int ret;
+
+               while (!bkey_eq(iter->pos, end) &&
+                      !(ret = btree_trans_too_many_iters(trans) ?:
+                              __bch2_check_discard_freespace_key(trans, iter)))
+                       bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+
+               return ret;
+       }
+}
+
 /*
  * We've already checked that generation numbers in the bucket_gens btree are
  * valid for buckets that exist; this just checks for keys for nonexistent
@@ -1485,12 +1506,12 @@ bkey_err:
                        BTREE_ID_need_discard, POS_MIN,
                        BTREE_ITER_PREFETCH, k,
                        NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
-               bch2_check_discard_freespace_key(&trans, &iter)) ?:
+               bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?:
              for_each_btree_key_commit(&trans, iter,
                        BTREE_ID_freespace, POS_MIN,
                        BTREE_ITER_PREFETCH, k,
                        NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
-               bch2_check_discard_freespace_key(&trans, &iter)) ?:
+               bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?:
              for_each_btree_key_commit(&trans, iter,
                        BTREE_ID_bucket_gens, POS_MIN,
                        BTREE_ITER_PREFETCH, k,
index 661b76674a18540440d26d6730992a5b7b1d7ee5..e5816efecf2659c71aa6a4082994bf25b04cc00a 100644 (file)
@@ -793,7 +793,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
 
        /* Unlock before doing IO: */
        if (trans && sync)
-               bch2_trans_unlock(trans);
+               bch2_trans_unlock_noassert(trans);
 
        bch2_btree_node_read(c, b, sync);
 
index 0e32247fe1966cfe7061908e93ba0e8062367d1d..b78bae436794fa877da57ea8f168dada81bc306a 100644 (file)
@@ -2859,6 +2859,7 @@ static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
 u32 bch2_trans_begin(struct btree_trans *trans)
 {
        struct btree_path *path;
+       u64 now;
 
        bch2_trans_reset_updates(trans);
 
@@ -2887,10 +2888,14 @@ u32 bch2_trans_begin(struct btree_trans *trans)
                        path->preserve = false;
        }
 
+       now = local_clock();
        if (!trans->restarted &&
            (need_resched() ||
-            local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))
+            now - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
                drop_locks_do(trans, (cond_resched(), 0));
+               now = local_clock();
+       }
+       trans->last_begin_time = now;
 
        if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
                bch2_trans_reset_srcu_lock(trans);
@@ -2901,7 +2906,6 @@ u32 bch2_trans_begin(struct btree_trans *trans)
                trans->notrace_relock_fail = false;
        }
 
-       trans->last_begin_time = local_clock();
        return trans->restart_count;
 }
 
index 5b290324b967eac1a93231f7d3ede9d6295dab3f..7d9e01c1d307a723a3b64b5d652022df9289cc6a 100644 (file)
@@ -714,6 +714,14 @@ int bch2_trans_relock_notrace(struct btree_trans *trans)
        return 0;
 }
 
+void bch2_trans_unlock_noassert(struct btree_trans *trans)
+{
+       struct btree_path *path;
+
+       trans_for_each_path(trans, path)
+               __bch2_btree_path_unlock(trans, path);
+}
+
 void bch2_trans_unlock(struct btree_trans *trans)
 {
        struct btree_path *path;
index b341cc894c61c06dc0c689945e6b0f943a5f2b12..0ad8fd44aba7a79985f002b5bf6b3ca3c502cb52 100644 (file)
@@ -22,6 +22,8 @@ void bch2_assert_btree_nodes_not_locked(void);
 static inline void bch2_assert_btree_nodes_not_locked(void) {}
 #endif
 
+void bch2_trans_unlock_noassert(struct btree_trans *);
+
 static inline bool is_btree_node(struct btree_path *path, unsigned l)
 {
        return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
index d2811c4756b7d7be5f753d26ad08c2c16778a856..e95e48857bb8b7a77d59219691801c4ca401a46c 100644 (file)
@@ -1077,7 +1077,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
                                        BKEY_BTREE_PTR_U64s_MAX * (1 + split)))
                        break;
 
-               split = true;
+               split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
        }
 
        if (flags & BTREE_INSERT_GC_LOCK_HELD)
index c89ee14f8b6b8b5ce7fcbd5f5e963df65abdcfaf..9f7a30c7ad36195b327f2a6d50cc85e5aa4c51cb 100644 (file)
@@ -474,7 +474,7 @@ int bch2_data_update_init(struct btree_trans *trans,
                        if (crc_is_compressed(p.crc))
                                reserve_sectors += k.k->size;
 
-                       m->op.nr_replicas += bch2_extent_ptr_durability(c, &p);
+                       m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
                } else if (!p.ptr.cached) {
                        bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
                }
index e2b126ad2babdf5a3b8fdb8730cffd6453e12d25..7e00550980de3f4c9a8c1eef3079677f8177c502 100644 (file)
@@ -641,9 +641,8 @@ unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
        return replicas;
 }
 
-unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
+unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
 {
-       unsigned durability = 0;
        struct bch_dev *ca;
 
        if (p->ptr.cached)
@@ -651,13 +650,28 @@ unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded
 
        ca = bch_dev_bkey_exists(c, p->ptr.dev);
 
-       if (ca->mi.state != BCH_MEMBER_STATE_failed)
-               durability = max_t(unsigned, durability, ca->mi.durability);
+       return ca->mi.durability +
+               (p->has_ec
+                ? p->ec.redundancy
+                : 0);
+}
 
-       if (p->has_ec)
-               durability += p->ec.redundancy;
+unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
+{
+       struct bch_dev *ca;
 
-       return durability;
+       if (p->ptr.cached)
+               return 0;
+
+       ca = bch_dev_bkey_exists(c, p->ptr.dev);
+
+       if (ca->mi.state == BCH_MEMBER_STATE_failed)
+               return 0;
+
+       return ca->mi.durability +
+               (p->has_ec
+                ? p->ec.redundancy
+                : 0);
 }
 
 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
index 31c8140950e0073d615c52bb39a4146e7815c2e4..3ba41e37d8649610e94c56e26477baec8a53c7e6 100644 (file)
@@ -610,6 +610,7 @@ bool bch2_bkey_is_incompressible(struct bkey_s_c);
 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
 
 unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
+unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
 unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
 unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
 
index 5b81c3fc18be1df6302325ddd288550496b82d34..0b9c4bb7c9a71d944e8ff65005942a6c4a9b4fdf 100644 (file)
@@ -47,26 +47,26 @@ struct six_lock_vals {
        enum six_lock_type      unlock_wakeup;
 };
 
-#define LOCK_VALS {                                                    \
-       [SIX_LOCK_read] = {                                             \
-               .lock_val       = 1U << SIX_LOCK_HELD_read_OFFSET,      \
-               .lock_fail      = SIX_LOCK_HELD_write,                  \
-               .held_mask      = SIX_LOCK_HELD_read,                   \
-               .unlock_wakeup  = SIX_LOCK_write,                       \
-       },                                                              \
-       [SIX_LOCK_intent] = {                                           \
-               .lock_val       = SIX_LOCK_HELD_intent,                 \
-               .lock_fail      = SIX_LOCK_HELD_intent,                 \
-               .held_mask      = SIX_LOCK_HELD_intent,                 \
-               .unlock_wakeup  = SIX_LOCK_intent,                      \
-       },                                                              \
-       [SIX_LOCK_write] = {                                            \
-               .lock_val       = SIX_LOCK_HELD_write,                  \
-               .lock_fail      = SIX_LOCK_HELD_read,                   \
-               .held_mask      = SIX_LOCK_HELD_write,                  \
-               .unlock_wakeup  = SIX_LOCK_read,                        \
-       },                                                              \
-}
+static const struct six_lock_vals l[] = {
+       [SIX_LOCK_read] = {
+               .lock_val       = 1U << SIX_LOCK_HELD_read_OFFSET,
+               .lock_fail      = SIX_LOCK_HELD_write,
+               .held_mask      = SIX_LOCK_HELD_read,
+               .unlock_wakeup  = SIX_LOCK_write,
+       },
+       [SIX_LOCK_intent] = {
+               .lock_val       = SIX_LOCK_HELD_intent,
+               .lock_fail      = SIX_LOCK_HELD_intent,
+               .held_mask      = SIX_LOCK_HELD_intent,
+               .unlock_wakeup  = SIX_LOCK_intent,
+       },
+       [SIX_LOCK_write] = {
+               .lock_val       = SIX_LOCK_HELD_write,
+               .lock_fail      = SIX_LOCK_HELD_read,
+               .held_mask      = SIX_LOCK_HELD_write,
+               .unlock_wakeup  = SIX_LOCK_read,
+       },
+};
 
 static inline void six_set_bitmask(struct six_lock *lock, u32 mask)
 {
@@ -116,7 +116,6 @@ static inline unsigned pcpu_read_count(struct six_lock *lock)
 static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
                            struct task_struct *task, bool try)
 {
-       const struct six_lock_vals l[] = LOCK_VALS;
        int ret;
        u32 old;
 
@@ -301,10 +300,10 @@ EXPORT_SYMBOL_GPL(six_trylock_ip);
 bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
                   unsigned seq, unsigned long ip)
 {
-       if (lock->seq != seq || !six_trylock_ip(lock, type, ip))
+       if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip))
                return false;
 
-       if (lock->seq != seq) {
+       if (six_lock_seq(lock) != seq) {
                six_unlock_ip(lock, type, ip);
                return false;
        }
@@ -588,7 +587,6 @@ EXPORT_SYMBOL_GPL(six_lock_ip_waiter);
 __always_inline
 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
 {
-       const struct six_lock_vals l[] = LOCK_VALS;
        u32 state;
 
        if (type == SIX_LOCK_intent)
@@ -638,6 +636,8 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long
 
        if (type != SIX_LOCK_write)
                six_release(&lock->dep_map, ip);
+       else
+               lock->seq++;
 
        if (type == SIX_LOCK_intent &&
            lock->intent_lock_recurse) {
@@ -645,8 +645,6 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long
                return;
        }
 
-       lock->seq += type == SIX_LOCK_write;
-
        do_six_unlock_type(lock, type);
 }
 EXPORT_SYMBOL_GPL(six_unlock_ip);
@@ -675,7 +673,6 @@ EXPORT_SYMBOL_GPL(six_lock_downgrade);
  */
 bool six_lock_tryupgrade(struct six_lock *lock)
 {
-       const struct six_lock_vals l[] = LOCK_VALS;
        u32 old = atomic_read(&lock->state), new;
 
        do {
@@ -743,8 +740,6 @@ EXPORT_SYMBOL_GPL(six_trylock_convert);
  */
 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
 {
-       const struct six_lock_vals l[] = LOCK_VALS;
-
        six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_);
 
        /* XXX: assert already locked, and that we don't overflow: */