]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_update_leaf.c
Update bcachefs sources to fd381c355c bcachefs: Fix a null ptr deref in fsck check_ex...
[bcachefs-tools-debian] / libbcachefs / btree_update_leaf.c
index 20ad79891bfdae58cdf408699ca99a4fa07020d8..c17d048b1c267c146dba61a78a4d2814572bb035 100644 (file)
@@ -227,12 +227,12 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
        return 0;
 }
 
-static int btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
+int bch2_btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
 {
        return __btree_node_flush(j, pin, 0, seq);
 }
 
-static int btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
+int bch2_btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
 {
        return __btree_node_flush(j, pin, 1, seq);
 }
@@ -244,8 +244,8 @@ inline void bch2_btree_add_journal_pin(struct bch_fs *c,
 
        bch2_journal_pin_add(&c->journal, seq, &w->journal,
                             btree_node_write_idx(b) == 0
-                            ? btree_node_flush0
-                            : btree_node_flush1);
+                            ? bch2_btree_node_flush0
+                            : bch2_btree_node_flush1);
 }
 
 /**
@@ -401,7 +401,7 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
        if (!new_k) {
                bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
                        bch2_btree_ids[path->btree_id], new_u64s);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_key_cache_insert;
        }
 
        trans_for_each_update(trans, i)
@@ -434,7 +434,8 @@ static int run_one_mem_trigger(struct btree_trans *trans,
        if (bch2_bkey_ops[old.k->type].atomic_trigger ==
            bch2_bkey_ops[i->k->k.type].atomic_trigger &&
            ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
-               ret   = bch2_mark_key(trans, old, bkey_i_to_s_c(new),
+               ret   = bch2_mark_key(trans, i->btree_id, i->level,
+                               old, bkey_i_to_s_c(new),
                                BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
        } else {
                struct bkey             _deleted = KEY(0, 0, 0);
@@ -442,9 +443,11 @@ static int run_one_mem_trigger(struct btree_trans *trans,
 
                _deleted.p = i->path->pos;
 
-               ret   = bch2_mark_key(trans, deleted, bkey_i_to_s_c(new),
+               ret   = bch2_mark_key(trans, i->btree_id, i->level,
+                               deleted, bkey_i_to_s_c(new),
                                BTREE_TRIGGER_INSERT|flags) ?:
-                       bch2_mark_key(trans, old, deleted,
+                       bch2_mark_key(trans, i->btree_id, i->level,
+                               old, deleted,
                                BTREE_TRIGGER_OVERWRITE|flags);
        }
 
@@ -619,14 +622,6 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
 
        prefetch(&trans->c->journal.flags);
 
-       h = trans->hooks;
-       while (h) {
-               ret = h->fn(trans, h);
-               if (ret)
-                       return ret;
-               h = h->next;
-       }
-
        trans_for_each_update(trans, i) {
                /* Multiple inserts might go to same leaf: */
                if (!same_leaf_as_prev(trans, i))
@@ -693,6 +688,14 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
                        goto revert_fs_usage;
        }
 
+       h = trans->hooks;
+       while (h) {
+               ret = h->fn(trans, h);
+               if (ret)
+                       goto revert_fs_usage;
+               h = h->next;
+       }
+
        trans_for_each_update(trans, i)
                if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) {
                        ret = run_one_mem_trigger(trans, i, i->flags);
@@ -762,7 +765,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
                if (!i->cached)
                        btree_insert_key_leaf(trans, i);
                else if (!i->key_cache_already_flushed)
-                       bch2_btree_insert_key_cached(trans, flags, i->path, i->k);
+                       bch2_btree_insert_key_cached(trans, flags, i);
                else {
                        bch2_btree_key_cache_drop(trans, i->path);
                        btree_path_set_dirty(i->path, BTREE_ITER_NEED_TRAVERSE);
@@ -1423,10 +1426,15 @@ int bch2_trans_update_extent(struct btree_trans *trans,
                        update->k.p = k.k->p;
                        update->k.p.snapshot = insert->k.p.snapshot;
 
-                       if (insert->k.p.snapshot != k.k->p.snapshot ||
-                           (btree_type_has_snapshots(btree_id) &&
-                            need_whiteout_for_snapshot(trans, btree_id, update->k.p)))
+                       if (insert->k.p.snapshot != k.k->p.snapshot) {
                                update->k.type = KEY_TYPE_whiteout;
+                       } else if (btree_type_has_snapshots(btree_id)) {
+                               ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
+                               if (ret < 0)
+                                       goto err;
+                               if (ret)
+                                       update->k.type = KEY_TYPE_whiteout;
+                       }
 
                        ret = bch2_btree_insert_nonextent(trans, btree_id, update,
                                                  BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
@@ -1749,6 +1757,7 @@ int __bch2_btree_insert(struct btree_trans *trans, enum btree_id id,
        int ret;
 
        bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
+                            BTREE_ITER_CACHED|
                             BTREE_ITER_INTENT);
        ret   = bch2_btree_iter_traverse(&iter) ?:
                bch2_trans_update(trans, &iter, k, flags);
@@ -1793,6 +1802,20 @@ int bch2_btree_delete_at(struct btree_trans *trans,
        return bch2_btree_delete_extent_at(trans, iter, 0, update_flags);
 }
 
+int bch2_btree_delete_at_buffered(struct btree_trans *trans,
+                                 enum btree_id btree, struct bpos pos)
+{
+       struct bkey_i *k;
+
+       k = bch2_trans_kmalloc(trans, sizeof(*k));
+       if (IS_ERR(k))
+               return PTR_ERR(k);
+
+       bkey_init(&k->k);
+       k->k.p = pos;
+       return bch2_trans_update_buffered(trans, btree, k);
+}
+
 int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
                                  struct bpos start, struct bpos end,
                                  unsigned update_flags,
@@ -1887,7 +1910,7 @@ static int __bch2_trans_log_msg(darray_u64 *entries, const char *fmt, va_list ar
        int ret;
 
        prt_vprintf(&buf, fmt, args);
-       ret = buf.allocation_failure ? -ENOMEM : 0;
+       ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
        if (ret)
                goto err;
 
@@ -1915,14 +1938,19 @@ err:
        return ret;
 }
 
-int bch2_trans_log_msg(struct btree_trans *trans, const char *fmt, ...)
+static int
+__bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
+                 va_list args)
 {
-       va_list args;
        int ret;
 
-       va_start(args, fmt);
-       ret = __bch2_trans_log_msg(&trans->extra_journal_entries, fmt, args);
-       va_end(args);
+       if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
+               ret = __bch2_trans_log_msg(&c->journal.early_journal_entries, fmt, args);
+       } else {
+               ret = bch2_trans_do(c, NULL, NULL,
+                       BTREE_INSERT_LAZY_RW|commit_flags,
+                       __bch2_trans_log_msg(&trans.extra_journal_entries, fmt, args));
+       }
 
        return ret;
 }
@@ -1933,16 +1961,22 @@ int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
        int ret;
 
        va_start(args, fmt);
+       ret = __bch2_fs_log_msg(c, 0, fmt, args);
+       va_end(args);
+       return ret;
+}
 
-       if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
-               ret = __bch2_trans_log_msg(&c->journal.early_journal_entries, fmt, args);
-       } else {
-               ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
-                       __bch2_trans_log_msg(&trans.extra_journal_entries, fmt, args));
-       }
+/*
+ * Use for logging messages during recovery to enable reserved space and avoid
+ * blocking.
+ */
+int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
+{
+       va_list args;
+       int ret;
 
+       va_start(args, fmt);
+       ret = __bch2_fs_log_msg(c, JOURNAL_WATERMARK_reserved, fmt, args);
        va_end(args);
-
        return ret;
-
 }