1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_update.h"
5 #include "btree_iter.h"
6 #include "btree_journal_iter.h"
7 #include "btree_locking.h"
17 static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
18 const struct btree_insert_entry *r)
20 return cmp_int(l->btree_id, r->btree_id) ?:
21 cmp_int(l->cached, r->cached) ?:
22 -cmp_int(l->level, r->level) ?:
23 bpos_cmp(l->k->k.p, r->k->k.p);
26 static int __must_check
27 bch2_trans_update_by_path(struct btree_trans *, struct btree_path *,
28 struct bkey_i *, enum btree_update_flags,
31 static noinline int __check_pos_snapshot_overwritten(struct btree_trans *trans,
35 struct bch_fs *c = trans->c;
36 struct btree_iter iter;
40 bch2_trans_iter_init(trans, &iter, id, pos,
41 BTREE_ITER_NOT_EXTENTS|
42 BTREE_ITER_ALL_SNAPSHOTS);
44 k = bch2_btree_iter_prev(&iter);
52 if (!bkey_eq(pos, k.k->p))
55 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
60 bch2_trans_iter_exit(trans, &iter);
65 static inline int check_pos_snapshot_overwritten(struct btree_trans *trans,
69 if (!btree_type_has_snapshots(id) ||
70 bch2_snapshot_is_leaf(trans->c, pos.snapshot))
73 return __check_pos_snapshot_overwritten(trans, id, pos);
76 static noinline int extent_front_merge(struct btree_trans *trans,
77 struct btree_iter *iter,
79 struct bkey_i **insert,
80 enum btree_update_flags flags)
82 struct bch_fs *c = trans->c;
83 struct bkey_i *update;
86 update = bch2_bkey_make_mut_noupdate(trans, k);
87 ret = PTR_ERR_OR_ZERO(update);
91 if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert)))
94 ret = check_pos_snapshot_overwritten(trans, iter->btree_id, k.k->p) ?:
95 check_pos_snapshot_overwritten(trans, iter->btree_id, (*insert)->k.p);
101 ret = bch2_btree_delete_at(trans, iter, flags);
109 static noinline int extent_back_merge(struct btree_trans *trans,
110 struct btree_iter *iter,
111 struct bkey_i *insert,
114 struct bch_fs *c = trans->c;
117 ret = check_pos_snapshot_overwritten(trans, iter->btree_id, insert->k.p) ?:
118 check_pos_snapshot_overwritten(trans, iter->btree_id, k.k->p);
124 bch2_bkey_merge(c, bkey_i_to_s(insert), k);
129 * When deleting, check if we need to emit a whiteout (because we're overwriting
130 * something in an ancestor snapshot)
132 static int need_whiteout_for_snapshot(struct btree_trans *trans,
133 enum btree_id btree_id, struct bpos pos)
135 struct btree_iter iter;
137 u32 snapshot = pos.snapshot;
140 if (!bch2_snapshot_parent(trans->c, pos.snapshot))
145 for_each_btree_key_norestart(trans, iter, btree_id, pos,
146 BTREE_ITER_ALL_SNAPSHOTS|
147 BTREE_ITER_NOPRESERVE, k, ret) {
148 if (!bkey_eq(k.k->p, pos))
151 if (bch2_snapshot_is_ancestor(trans->c, snapshot,
153 ret = !bkey_whiteout(k.k);
157 bch2_trans_iter_exit(trans, &iter);
162 int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
167 struct bch_fs *c = trans->c;
168 struct btree_iter old_iter, new_iter = { NULL };
169 struct bkey_s_c old_k, new_k;
171 struct bkey_i *update;
174 if (!bch2_snapshot_has_children(c, old_pos.snapshot))
179 bch2_trans_iter_init(trans, &old_iter, id, old_pos,
180 BTREE_ITER_NOT_EXTENTS|
181 BTREE_ITER_ALL_SNAPSHOTS);
182 while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
183 !(ret = bkey_err(old_k)) &&
184 bkey_eq(old_pos, old_k.k->p)) {
185 struct bpos whiteout_pos =
186 SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot);;
188 if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) ||
189 snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot))
192 new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
193 BTREE_ITER_NOT_EXTENTS|
195 ret = bkey_err(new_k);
199 if (new_k.k->type == KEY_TYPE_deleted) {
200 update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
201 ret = PTR_ERR_OR_ZERO(update);
205 bkey_init(&update->k);
206 update->k.p = whiteout_pos;
207 update->k.type = KEY_TYPE_whiteout;
209 ret = bch2_trans_update(trans, &new_iter, update,
210 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
212 bch2_trans_iter_exit(trans, &new_iter);
214 ret = snapshot_list_add(c, &s, old_k.k->p.snapshot);
218 bch2_trans_iter_exit(trans, &new_iter);
219 bch2_trans_iter_exit(trans, &old_iter);
225 int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
226 struct btree_iter *iter,
227 enum btree_update_flags flags,
231 enum btree_id btree_id = iter->btree_id;
232 struct bkey_i *update;
233 struct bpos new_start = bkey_start_pos(new.k);
234 bool front_split = bkey_lt(bkey_start_pos(old.k), new_start);
235 bool back_split = bkey_gt(old.k->p, new.k->p);
236 int ret = 0, compressed_sectors;
239 * If we're going to be splitting a compressed extent, note it
240 * so that __bch2_trans_commit() can increase our disk
243 if (((front_split && back_split) ||
244 ((front_split || back_split) && old.k->p.snapshot != new.k->p.snapshot)) &&
245 (compressed_sectors = bch2_bkey_sectors_compressed(old)))
246 trans->extra_journal_res += compressed_sectors;
249 update = bch2_bkey_make_mut_noupdate(trans, old);
250 if ((ret = PTR_ERR_OR_ZERO(update)))
253 bch2_cut_back(new_start, update);
255 ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
256 old.k->p, update->k.p) ?:
257 bch2_btree_insert_nonextent(trans, btree_id, update,
258 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
263 /* If we're overwriting in a different snapshot - middle split: */
264 if (old.k->p.snapshot != new.k->p.snapshot &&
265 (front_split || back_split)) {
266 update = bch2_bkey_make_mut_noupdate(trans, old);
267 if ((ret = PTR_ERR_OR_ZERO(update)))
270 bch2_cut_front(new_start, update);
271 bch2_cut_back(new.k->p, update);
273 ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
274 old.k->p, update->k.p) ?:
275 bch2_btree_insert_nonextent(trans, btree_id, update,
276 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
281 if (bkey_le(old.k->p, new.k->p)) {
282 update = bch2_trans_kmalloc(trans, sizeof(*update));
283 if ((ret = PTR_ERR_OR_ZERO(update)))
286 bkey_init(&update->k);
287 update->k.p = old.k->p;
288 update->k.p.snapshot = new.k->p.snapshot;
290 if (new.k->p.snapshot != old.k->p.snapshot) {
291 update->k.type = KEY_TYPE_whiteout;
292 } else if (btree_type_has_snapshots(btree_id)) {
293 ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
297 update->k.type = KEY_TYPE_whiteout;
300 ret = bch2_btree_insert_nonextent(trans, btree_id, update,
301 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
307 update = bch2_bkey_make_mut_noupdate(trans, old);
308 if ((ret = PTR_ERR_OR_ZERO(update)))
311 bch2_cut_front(new.k->p, update);
313 ret = bch2_trans_update_by_path(trans, iter->path, update,
314 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
323 static int bch2_trans_update_extent(struct btree_trans *trans,
324 struct btree_iter *orig_iter,
325 struct bkey_i *insert,
326 enum btree_update_flags flags)
328 struct btree_iter iter;
330 enum btree_id btree_id = orig_iter->btree_id;
333 bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
335 BTREE_ITER_WITH_UPDATES|
336 BTREE_ITER_NOT_EXTENTS);
337 k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
338 if ((ret = bkey_err(k)))
343 if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
344 if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
345 ret = extent_front_merge(trans, &iter, k, &insert, flags);
353 while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
354 bool done = bkey_lt(insert->k.p, k.k->p);
356 ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
363 bch2_btree_iter_advance(&iter);
364 k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
365 if ((ret = bkey_err(k)))
371 if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
372 ret = extent_back_merge(trans, &iter, insert, k);
377 if (!bkey_deleted(&insert->k))
378 ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
380 bch2_trans_iter_exit(trans, &iter);
385 static noinline int flush_new_cached_update(struct btree_trans *trans,
386 struct btree_path *path,
387 struct btree_insert_entry *i,
388 enum btree_update_flags flags,
391 struct btree_path *btree_path;
395 btree_path = bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
396 BTREE_ITER_INTENT, _THIS_IP_);
397 ret = bch2_btree_path_traverse(trans, btree_path, 0);
402 * The old key in the insert entry might actually refer to an existing
403 * key in the btree that has been deleted from cache and not yet
404 * flushed. Check for this and skip the flush so we don't run triggers
405 * against a stale key.
407 bch2_btree_path_peek_slot_exact(btree_path, &k);
408 if (!bkey_deleted(&k))
411 i->key_cache_already_flushed = true;
412 i->flags |= BTREE_TRIGGER_NORUN;
414 btree_path_set_should_be_locked(btree_path);
415 ret = bch2_trans_update_by_path(trans, btree_path, i->k, flags, ip);
417 bch2_path_put(trans, btree_path, true);
421 static int __must_check
422 bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
423 struct bkey_i *k, enum btree_update_flags flags,
426 struct bch_fs *c = trans->c;
427 struct btree_insert_entry *i, n;
431 EBUG_ON(!path->should_be_locked);
432 EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
433 EBUG_ON(!bpos_eq(k->k.p, path->pos));
436 * The transaction journal res hasn't been allocated at this point.
437 * That occurs at commit time. Reuse the seq field to pass in the seq
438 * of a prejournaled key.
440 if (flags & BTREE_UPDATE_PREJOURNAL)
441 seq = trans->journal_res.seq;
443 n = (struct btree_insert_entry) {
445 .bkey_type = __btree_node_type(path->level, path->btree_id),
446 .btree_id = path->btree_id,
447 .level = path->level,
448 .cached = path->cached,
455 #ifdef CONFIG_BCACHEFS_DEBUG
456 trans_for_each_update(trans, i)
457 BUG_ON(i != trans->updates &&
458 btree_insert_entry_cmp(i - 1, i) >= 0);
462 * Pending updates are kept sorted: first, find position of new update,
463 * then delete/trim any updates the new update overwrites:
465 trans_for_each_update(trans, i) {
466 cmp = btree_insert_entry_cmp(&n, i);
471 if (!cmp && i < trans->updates + trans->nr_updates) {
472 EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
474 bch2_path_put(trans, i->path, true);
476 i->cached = n.cached;
480 i->ip_allocated = n.ip_allocated;
482 array_insert_item(trans->updates, trans->nr_updates,
483 i - trans->updates, n);
485 i->old_v = bch2_btree_path_peek_slot_exact(path, &i->old_k).v;
486 i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
488 if (unlikely(trans->journal_replay_not_finished)) {
490 bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
499 __btree_path_get(i->path, true);
502 * If a key is present in the key cache, it must also exist in the
503 * btree - this is necessary for cache coherency. When iterating over
504 * a btree that's cached in the key cache, the btree iter code checks
505 * the key cache - but the key has to exist in the btree for that to
508 if (path->cached && bkey_deleted(&i->old_k))
509 return flush_new_cached_update(trans, path, i, flags, ip);
514 int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
515 struct bkey_i *k, enum btree_update_flags flags)
517 struct btree_path *path = iter->update_path ?: iter->path;
518 struct bkey_cached *ck;
521 if (iter->flags & BTREE_ITER_IS_EXTENTS)
522 return bch2_trans_update_extent(trans, iter, k, flags);
524 if (bkey_deleted(&k->k) &&
525 !(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
526 (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
527 ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
528 if (unlikely(ret < 0))
532 k->k.type = KEY_TYPE_whiteout;
536 * Ensure that updates to cached btrees go to the key cache:
538 if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
541 btree_id_cached(trans->c, path->btree_id)) {
542 if (!iter->key_cache_path ||
543 !iter->key_cache_path->should_be_locked ||
544 !bpos_eq(iter->key_cache_path->pos, k->k.p)) {
545 if (!iter->key_cache_path)
546 iter->key_cache_path =
547 bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
549 BTREE_ITER_CACHED, _THIS_IP_);
551 iter->key_cache_path =
552 bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
553 iter->flags & BTREE_ITER_INTENT,
556 ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
561 ck = (void *) iter->key_cache_path->l[0].b;
563 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
564 trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
565 return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
568 btree_path_set_should_be_locked(iter->key_cache_path);
571 path = iter->key_cache_path;
574 return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);
578 * Add a transaction update for a key that has already been journaled.
580 int __must_check bch2_trans_update_seq(struct btree_trans *trans, u64 seq,
581 struct btree_iter *iter, struct bkey_i *k,
582 enum btree_update_flags flags)
584 trans->journal_res.seq = seq;
585 return bch2_trans_update(trans, iter, k, flags|BTREE_UPDATE_NOJOURNAL|
586 BTREE_UPDATE_PREJOURNAL);
589 int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
593 struct btree_write_buffered_key *i;
596 EBUG_ON(trans->nr_wb_updates > trans->wb_updates_size);
597 EBUG_ON(k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
599 trans_for_each_wb_update(trans, i) {
600 if (i->btree == btree && bpos_eq(i->k.k.p, k->k.p)) {
606 if (!trans->wb_updates ||
607 trans->nr_wb_updates == trans->wb_updates_size) {
608 struct btree_write_buffered_key *u;
610 if (trans->nr_wb_updates == trans->wb_updates_size) {
611 struct btree_transaction_stats *s = btree_trans_stats(trans);
613 BUG_ON(trans->wb_updates_size > U8_MAX / 2);
614 trans->wb_updates_size = max(1, trans->wb_updates_size * 2);
616 s->wb_updates_size = trans->wb_updates_size;
619 u = bch2_trans_kmalloc_nomemzero(trans,
620 trans->wb_updates_size *
621 sizeof(struct btree_write_buffered_key));
622 ret = PTR_ERR_OR_ZERO(u);
626 if (trans->nr_wb_updates)
627 memcpy(u, trans->wb_updates, trans->nr_wb_updates *
628 sizeof(struct btree_write_buffered_key));
629 trans->wb_updates = u;
632 trans->wb_updates[trans->nr_wb_updates] = (struct btree_write_buffered_key) {
636 bkey_copy(&trans->wb_updates[trans->nr_wb_updates].k, k);
637 trans->nr_wb_updates++;
642 int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
643 enum btree_id btree, struct bpos end)
648 bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_INTENT);
649 k = bch2_btree_iter_prev(iter);
654 bch2_btree_iter_advance(iter);
655 k = bch2_btree_iter_peek_slot(iter);
660 BUG_ON(k.k->type != KEY_TYPE_deleted);
662 if (bkey_gt(k.k->p, end)) {
663 ret = -BCH_ERR_ENOSPC_btree_slot;
669 bch2_trans_iter_exit(trans, iter);
673 void bch2_trans_commit_hook(struct btree_trans *trans,
674 struct btree_trans_commit_hook *h)
676 h->next = trans->hooks;
680 int bch2_btree_insert_nonextent(struct btree_trans *trans,
681 enum btree_id btree, struct bkey_i *k,
682 enum btree_update_flags flags)
684 struct btree_iter iter;
687 bch2_trans_iter_init(trans, &iter, btree, k->k.p,
688 BTREE_ITER_NOT_EXTENTS|
690 ret = bch2_btree_iter_traverse(&iter) ?:
691 bch2_trans_update(trans, &iter, k, flags);
692 bch2_trans_iter_exit(trans, &iter);
696 int __bch2_btree_insert(struct btree_trans *trans, enum btree_id id,
697 struct bkey_i *k, enum btree_update_flags flags)
699 struct btree_iter iter;
702 bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
705 ret = bch2_btree_iter_traverse(&iter) ?:
706 bch2_trans_update(trans, &iter, k, flags);
707 bch2_trans_iter_exit(trans, &iter);
712 * bch2_btree_insert - insert keys into the extent btree
713 * @c: pointer to struct bch_fs
714 * @id: btree to insert into
715 * @insert_keys: list of keys to insert
716 * @hook: insert callback
718 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
720 struct disk_reservation *disk_res,
721 u64 *journal_seq, int flags)
723 return bch2_trans_do(c, disk_res, journal_seq, flags,
724 __bch2_btree_insert(&trans, id, k, 0));
727 int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter,
728 unsigned len, unsigned update_flags)
732 k = bch2_trans_kmalloc(trans, sizeof(*k));
738 bch2_key_resize(&k->k, len);
739 return bch2_trans_update(trans, iter, k, update_flags);
742 int bch2_btree_delete_at(struct btree_trans *trans,
743 struct btree_iter *iter, unsigned update_flags)
745 return bch2_btree_delete_extent_at(trans, iter, 0, update_flags);
748 int bch2_btree_delete_at_buffered(struct btree_trans *trans,
749 enum btree_id btree, struct bpos pos)
753 k = bch2_trans_kmalloc(trans, sizeof(*k));
759 return bch2_trans_update_buffered(trans, btree, k);
762 int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
763 struct bpos start, struct bpos end,
764 unsigned update_flags,
767 u32 restart_count = trans->restart_count;
768 struct btree_iter iter;
772 bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT);
773 while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
774 struct disk_reservation disk_res =
775 bch2_disk_reservation_init(trans->c, 0);
776 struct bkey_i delete;
782 bkey_init(&delete.k);
785 * This could probably be more efficient for extents:
789 * For extents, iter.pos won't necessarily be the same as
790 * bkey_start_pos(k.k) (for non extents they always will be the
791 * same). It's important that we delete starting from iter.pos
792 * because the range we want to delete could start in the middle
795 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
796 * bkey_start_pos(k.k)).
798 delete.k.p = iter.pos;
800 if (iter.flags & BTREE_ITER_IS_EXTENTS)
801 bch2_key_resize(&delete.k,
802 bpos_min(end, k.k->p).offset -
805 ret = bch2_trans_update(trans, &iter, &delete, update_flags) ?:
806 bch2_trans_commit(trans, &disk_res, journal_seq,
807 BTREE_INSERT_NOFAIL);
808 bch2_disk_reservation_put(trans->c, &disk_res);
811 * the bch2_trans_begin() call is in a weird place because we
812 * need to call it after every transaction commit, to avoid path
813 * overflow, but don't want to call it if the delete operation
814 * is a no-op and we have no work to do:
816 bch2_trans_begin(trans);
818 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
823 bch2_trans_iter_exit(trans, &iter);
825 if (!ret && trans_was_restarted(trans, restart_count))
826 ret = -BCH_ERR_transaction_restart_nested;
831 * bch_btree_delete_range - delete everything within a given range
833 * Range is a half open interval - [start, end)
835 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
836 struct bpos start, struct bpos end,
837 unsigned update_flags,
840 int ret = bch2_trans_run(c,
841 bch2_btree_delete_range_trans(&trans, id, start, end,
842 update_flags, journal_seq));
843 if (ret == -BCH_ERR_transaction_restart_nested)
848 int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
849 struct bpos pos, bool set)
854 k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
855 ret = PTR_ERR_OR_ZERO(k);
860 k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
863 return bch2_trans_update_buffered(trans, btree, k);
866 static int __bch2_trans_log_msg(darray_u64 *entries, const char *fmt, va_list args)
868 struct printbuf buf = PRINTBUF;
869 struct jset_entry_log *l;
873 prt_vprintf(&buf, fmt, args);
874 ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
878 u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
880 ret = darray_make_room(entries, jset_u64s(u64s));
884 l = (void *) &darray_top(*entries);
885 l->entry.u64s = cpu_to_le16(u64s);
886 l->entry.btree_id = 0;
888 l->entry.type = BCH_JSET_ENTRY_log;
892 memcpy(l->d, buf.buf, buf.pos);
894 l->d[buf.pos++] = '\0';
896 entries->nr += jset_u64s(u64s);
903 __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
908 if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
909 ret = __bch2_trans_log_msg(&c->journal.early_journal_entries, fmt, args);
911 ret = bch2_trans_do(c, NULL, NULL,
912 BTREE_INSERT_LAZY_RW|commit_flags,
913 __bch2_trans_log_msg(&trans.extra_journal_entries, fmt, args));
919 int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
925 ret = __bch2_fs_log_msg(c, 0, fmt, args);
931 * Use for logging messages during recovery to enable reserved space and avoid
934 int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
940 ret = __bch2_fs_log_msg(c, BCH_WATERMARK_reclaim, fmt, args);