1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_update.h"
5 #include "btree_iter.h"
6 #include "btree_journal_iter.h"
7 #include "btree_locking.h"
17 #include <linux/darray.h>
19 static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
20 const struct btree_insert_entry *r)
22 return cmp_int(l->btree_id, r->btree_id) ?:
23 cmp_int(l->cached, r->cached) ?:
24 -cmp_int(l->level, r->level) ?:
25 bpos_cmp(l->k->k.p, r->k->k.p);
28 static int __must_check
29 bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
30 struct bkey_i *, enum btree_update_flags,
33 static noinline int extent_front_merge(struct btree_trans *trans,
34 struct btree_iter *iter,
36 struct bkey_i **insert,
37 enum btree_update_flags flags)
39 struct bch_fs *c = trans->c;
40 struct bkey_i *update;
43 update = bch2_bkey_make_mut_noupdate(trans, k);
44 ret = PTR_ERR_OR_ZERO(update);
48 if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert)))
51 ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p) ?:
52 bch2_key_has_snapshot_overwrites(trans, iter->btree_id, (*insert)->k.p);
58 ret = bch2_btree_delete_at(trans, iter, flags);
66 static noinline int extent_back_merge(struct btree_trans *trans,
67 struct btree_iter *iter,
68 struct bkey_i *insert,
71 struct bch_fs *c = trans->c;
74 ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
75 bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
81 bch2_bkey_merge(c, bkey_i_to_s(insert), k);
85 static struct bkey_s_c peek_slot_including_whiteouts(struct btree_trans *trans, struct btree_iter *iter,
86 enum btree_id btree, struct bpos pos)
91 for_each_btree_key_norestart(trans, *iter, btree, pos,
92 BTREE_ITER_ALL_SNAPSHOTS|
93 BTREE_ITER_NOPRESERVE, k, ret) {
94 if (!bkey_eq(k.k->p, pos))
96 if (bch2_snapshot_is_ancestor(trans->c, pos.snapshot, k.k->p.snapshot))
99 bch2_trans_iter_exit(trans, iter);
101 return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
105 * When deleting, check if we need to emit a whiteout (because we're overwriting
106 * something in an ancestor snapshot)
108 static int need_whiteout_for_snapshot(struct btree_trans *trans, enum btree_id btree, struct bpos pos)
110 pos.snapshot = bch2_snapshot_parent(trans->c, pos.snapshot);
114 struct btree_iter iter;
115 struct bkey_s_c k = peek_slot_including_whiteouts(trans, &iter, btree, pos);
116 int ret = bkey_err(k) ?: k.k && !bkey_whiteout(k.k);
117 bch2_trans_iter_exit(trans, &iter);
123 * We're overwriting a key at @pos in snapshot @snapshot, so we need to insert a
124 * whiteout: that might be in @snapshot, or if there are overwites in sibling
125 * snapshots, find the common ancestor where @pos is overwritten in every
126 * descendent and insert the whiteout there - which might be at @pos.
128 static int delete_interior_snapshot_key(struct btree_trans *trans,
130 struct bpos whiteout, bool deleting,
131 struct bpos overwrite, bool old_is_whiteout)
133 struct bch_fs *c = trans->c;
134 struct bpos orig_whiteout = whiteout, sib = whiteout;
135 struct btree_iter iter;
139 sib.snapshot = bch2_snapshot_sibling(c, sib.snapshot);
141 for_each_btree_key_norestart(trans, iter, btree, sib,
142 BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_INTENT, k, ret) {
143 BUG_ON(bpos_gt(k.k->p, overwrite));
145 if (bpos_lt(k.k->p, sib)) /* unrelated branch - skip */
147 if (bpos_gt(k.k->p, sib)) /* did not find @sib */
150 /* @overwrite is also written in @sib, now check parent */
151 whiteout.snapshot = bch2_snapshot_parent(c, whiteout.snapshot);
152 if (bpos_eq(whiteout, overwrite))
156 sib.snapshot = bch2_snapshot_sibling(c, sib.snapshot);
162 if (!deleting && bpos_eq(whiteout, orig_whiteout))
165 if (!bpos_eq(iter.pos, whiteout)) {
166 bch2_trans_iter_exit(trans, &iter);
167 bch2_trans_iter_init(trans, &iter, btree, whiteout, BTREE_ITER_INTENT);
168 k = bch2_btree_iter_peek_slot(&iter);
174 iter.flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
175 iter.flags |= BTREE_ITER_FILTER_SNAPSHOTS;
177 struct bkey_i *delete = bch2_trans_kmalloc(trans, sizeof(*delete));
178 ret = PTR_ERR_OR_ZERO(delete);
182 bkey_init(&delete->k);
183 delete->k.p = whiteout;
185 ret = !bpos_eq(whiteout, overwrite)
187 : need_whiteout_for_snapshot(trans, btree, whiteout);
191 delete->k.type = KEY_TYPE_whiteout;
193 ret = bch2_trans_update(trans, &iter, delete,
194 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
195 BTREE_UPDATE_SNAPSHOT_WHITEOUT_CHECKS_DONE);
198 bch2_trans_iter_exit(trans, &iter);
203 * We're overwriting a key in a snapshot that has ancestors: if we're
204 * overwriting a key in a different snapshot, we need to check if it is now
205 * fully overritten and can be deleted, and if we're deleting a key in the
206 * current snapshot we need to check if we need to leave a whiteout.
209 overwrite_interior_snapshot_key(struct btree_trans *trans,
210 struct btree_iter *iter,
213 struct bkey_s_c old = bch2_btree_iter_peek_slot(iter);
215 int ret = bkey_err(old);
219 if (!bkey_deleted(old.k)) {
220 if (old.k->p.snapshot != k->k.p.snapshot) {
222 * We're overwriting a key in a different snapshot:
223 * check if it's also been overwritten in siblings
225 ret = delete_interior_snapshot_key(trans, iter->btree_id,
226 k->k.p, bkey_deleted(&k->k),
227 old.k->p, bkey_whiteout(old.k));
230 if (bkey_deleted(&k->k))
232 } else if (bkey_deleted(&k->k)) {
234 * We're deleting a key in the current snapshot:
235 * check if we need to leave a whiteout
237 ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
238 if (unlikely(ret < 0))
241 k->k.type = KEY_TYPE_whiteout;
248 int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
253 struct bch_fs *c = trans->c;
254 struct btree_iter old_iter, new_iter = { NULL };
255 struct bkey_s_c old_k, new_k;
257 struct bkey_i *update;
260 if (!bch2_snapshot_has_children(c, old_pos.snapshot))
265 bch2_trans_iter_init(trans, &old_iter, id, old_pos,
266 BTREE_ITER_NOT_EXTENTS|
267 BTREE_ITER_ALL_SNAPSHOTS);
268 while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
269 !(ret = bkey_err(old_k)) &&
270 bkey_eq(old_pos, old_k.k->p)) {
271 struct bpos whiteout_pos =
272 SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot);;
274 if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) ||
275 snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot))
278 new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
279 BTREE_ITER_NOT_EXTENTS|
281 ret = bkey_err(new_k);
285 if (new_k.k->type == KEY_TYPE_deleted) {
286 update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
287 ret = PTR_ERR_OR_ZERO(update);
291 bkey_init(&update->k);
292 update->k.p = whiteout_pos;
293 update->k.type = KEY_TYPE_whiteout;
295 ret = bch2_trans_update(trans, &new_iter, update,
296 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
298 bch2_trans_iter_exit(trans, &new_iter);
300 ret = snapshot_list_add(c, &s, old_k.k->p.snapshot);
304 bch2_trans_iter_exit(trans, &new_iter);
305 bch2_trans_iter_exit(trans, &old_iter);
311 int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
312 struct btree_iter *iter,
313 enum btree_update_flags flags,
317 enum btree_id btree_id = iter->btree_id;
318 struct bkey_i *update;
319 struct bpos new_start = bkey_start_pos(new.k);
320 unsigned front_split = bkey_lt(bkey_start_pos(old.k), new_start);
321 unsigned back_split = bkey_gt(old.k->p, new.k->p);
322 unsigned middle_split = (front_split || back_split) &&
323 old.k->p.snapshot != new.k->p.snapshot;
324 unsigned nr_splits = front_split + back_split + middle_split;
325 int ret = 0, compressed_sectors;
328 * If we're going to be splitting a compressed extent, note it
329 * so that __bch2_trans_commit() can increase our disk
333 (compressed_sectors = bch2_bkey_sectors_compressed(old)))
334 trans->extra_disk_res += compressed_sectors * (nr_splits - 1);
337 update = bch2_bkey_make_mut_noupdate(trans, old);
338 if ((ret = PTR_ERR_OR_ZERO(update)))
341 bch2_cut_back(new_start, update);
343 ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
344 old.k->p, update->k.p) ?:
345 bch2_btree_insert_nonextent(trans, btree_id, update,
346 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
351 /* If we're overwriting in a different snapshot - middle split: */
353 update = bch2_bkey_make_mut_noupdate(trans, old);
354 if ((ret = PTR_ERR_OR_ZERO(update)))
357 bch2_cut_front(new_start, update);
358 bch2_cut_back(new.k->p, update);
360 ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
361 old.k->p, update->k.p) ?:
362 bch2_btree_insert_nonextent(trans, btree_id, update,
363 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
368 if (bkey_le(old.k->p, new.k->p)) {
369 update = bch2_trans_kmalloc(trans, sizeof(*update));
370 if ((ret = PTR_ERR_OR_ZERO(update)))
373 bkey_init(&update->k);
374 update->k.p = old.k->p;
375 update->k.p.snapshot = new.k->p.snapshot;
377 if (new.k->p.snapshot != old.k->p.snapshot) {
378 update->k.type = KEY_TYPE_whiteout;
379 } else if (btree_type_has_snapshots(btree_id)) {
380 ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
384 update->k.type = KEY_TYPE_whiteout;
387 ret = bch2_btree_insert_nonextent(trans, btree_id, update,
388 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
394 update = bch2_bkey_make_mut_noupdate(trans, old);
395 if ((ret = PTR_ERR_OR_ZERO(update)))
398 bch2_cut_front(new.k->p, update);
400 ret = bch2_trans_update_by_path(trans, iter->path, update,
401 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
410 static int bch2_trans_update_extent(struct btree_trans *trans,
411 struct btree_iter *orig_iter,
412 struct bkey_i *insert,
413 enum btree_update_flags flags)
415 struct btree_iter iter;
417 enum btree_id btree_id = orig_iter->btree_id;
420 bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
422 BTREE_ITER_WITH_UPDATES|
423 BTREE_ITER_NOT_EXTENTS);
424 k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
425 if ((ret = bkey_err(k)))
430 if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
431 if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
432 ret = extent_front_merge(trans, &iter, k, &insert, flags);
440 while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
441 bool done = bkey_lt(insert->k.p, k.k->p);
443 ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
450 bch2_btree_iter_advance(&iter);
451 k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
452 if ((ret = bkey_err(k)))
458 if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
459 ret = extent_back_merge(trans, &iter, insert, k);
464 if (!bkey_deleted(&insert->k))
465 ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
467 bch2_trans_iter_exit(trans, &iter);
472 static noinline int flush_new_cached_update(struct btree_trans *trans,
473 struct btree_insert_entry *i,
474 enum btree_update_flags flags,
480 btree_path_idx_t path_idx =
481 bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
482 BTREE_ITER_INTENT, _THIS_IP_);
483 ret = bch2_btree_path_traverse(trans, path_idx, 0);
487 struct btree_path *btree_path = trans->paths + path_idx;
490 * The old key in the insert entry might actually refer to an existing
491 * key in the btree that has been deleted from cache and not yet
492 * flushed. Check for this and skip the flush so we don't run triggers
493 * against a stale key.
495 bch2_btree_path_peek_slot_exact(btree_path, &k);
496 if (!bkey_deleted(&k))
499 i->key_cache_already_flushed = true;
500 i->flags |= BTREE_TRIGGER_NORUN;
502 btree_path_set_should_be_locked(btree_path);
503 ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
505 bch2_path_put(trans, path_idx, true);
509 static int __must_check
510 bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
511 struct bkey_i *k, enum btree_update_flags flags,
514 struct bch_fs *c = trans->c;
515 struct btree_insert_entry *i, n;
518 struct btree_path *path = trans->paths + path_idx;
519 EBUG_ON(!path->should_be_locked);
520 EBUG_ON(trans->nr_updates >= trans->nr_paths);
521 EBUG_ON(!bpos_eq(k->k.p, path->pos));
523 n = (struct btree_insert_entry) {
525 .bkey_type = __btree_node_type(path->level, path->btree_id),
526 .btree_id = path->btree_id,
527 .level = path->level,
528 .cached = path->cached,
534 #ifdef CONFIG_BCACHEFS_DEBUG
535 trans_for_each_update(trans, i)
536 BUG_ON(i != trans->updates &&
537 btree_insert_entry_cmp(i - 1, i) >= 0);
541 * Pending updates are kept sorted: first, find position of new update,
542 * then delete/trim any updates the new update overwrites:
544 for (i = trans->updates; i < trans->updates + trans->nr_updates; i++) {
545 cmp = btree_insert_entry_cmp(&n, i);
550 if (!cmp && i < trans->updates + trans->nr_updates) {
551 EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
553 bch2_path_put(trans, i->path, true);
555 i->cached = n.cached;
558 i->ip_allocated = n.ip_allocated;
560 array_insert_item(trans->updates, trans->nr_updates,
561 i - trans->updates, n);
563 i->old_v = bch2_btree_path_peek_slot_exact(path, &i->old_k).v;
564 i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
566 if (unlikely(trans->journal_replay_not_finished)) {
568 bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
577 __btree_path_get(trans->paths + i->path, true);
580 * If a key is present in the key cache, it must also exist in the
581 * btree - this is necessary for cache coherency. When iterating over
582 * a btree that's cached in the key cache, the btree iter code checks
583 * the key cache - but the key has to exist in the btree for that to
586 if (path->cached && bkey_deleted(&i->old_k))
587 return flush_new_cached_update(trans, i, flags, ip);
592 static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
593 struct btree_iter *iter,
594 struct btree_path *path)
596 struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter);
598 if (!key_cache_path ||
599 !key_cache_path->should_be_locked ||
600 !bpos_eq(key_cache_path->pos, iter->pos)) {
601 struct bkey_cached *ck;
604 if (!iter->key_cache_path)
605 iter->key_cache_path =
606 bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
608 BTREE_ITER_CACHED, _THIS_IP_);
610 iter->key_cache_path =
611 bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
612 iter->flags & BTREE_ITER_INTENT,
615 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_CACHED);
619 ck = (void *) trans->paths[iter->key_cache_path].l[0].b;
621 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
622 trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
623 return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
626 btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
632 int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
633 struct bkey_i *k, enum btree_update_flags flags)
635 if (iter->flags & BTREE_ITER_IS_EXTENTS)
636 return bch2_trans_update_extent(trans, iter, k, flags);
638 if (!(flags & (BTREE_UPDATE_SNAPSHOT_WHITEOUT_CHECKS_DONE|
639 BTREE_UPDATE_KEY_CACHE_RECLAIM)) &&
640 (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
641 bch2_snapshot_parent(trans->c, k->k.p.snapshot)) {
642 int ret = overwrite_interior_snapshot_key(trans, iter, k);
644 return ret < 0 ? ret : 0;
648 * Ensure that updates to cached btrees go to the key cache:
650 btree_path_idx_t path_idx = iter->update_path ?: iter->path;
651 struct btree_path *path = trans->paths + path_idx;
653 if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
656 btree_id_cached(trans->c, path->btree_id)) {
657 int ret = bch2_trans_update_get_key_cache(trans, iter, path);
661 path_idx = iter->key_cache_path;
664 return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_);
667 int bch2_btree_insert_clone_trans(struct btree_trans *trans,
671 struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k));
672 int ret = PTR_ERR_OR_ZERO(n);
677 return bch2_btree_insert_trans(trans, btree, n, 0);
680 struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
682 unsigned new_top = trans->journal_entries_u64s + u64s;
683 unsigned old_size = trans->journal_entries_size;
685 if (new_top > trans->journal_entries_size) {
686 trans->journal_entries_size = roundup_pow_of_two(new_top);
688 btree_trans_stats(trans)->journal_entries_size = trans->journal_entries_size;
691 struct jset_entry *n =
692 bch2_trans_kmalloc_nomemzero(trans,
693 trans->journal_entries_size * sizeof(u64));
697 if (trans->journal_entries)
698 memcpy(n, trans->journal_entries, old_size * sizeof(u64));
699 trans->journal_entries = n;
701 struct jset_entry *e = btree_trans_journal_entries_top(trans);
702 trans->journal_entries_u64s = new_top;
706 int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
707 enum btree_id btree, struct bpos end)
712 bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_INTENT);
713 k = bch2_btree_iter_prev(iter);
718 bch2_btree_iter_advance(iter);
719 k = bch2_btree_iter_peek_slot(iter);
724 BUG_ON(k.k->type != KEY_TYPE_deleted);
726 if (bkey_gt(k.k->p, end)) {
727 ret = -BCH_ERR_ENOSPC_btree_slot;
733 bch2_trans_iter_exit(trans, iter);
737 void bch2_trans_commit_hook(struct btree_trans *trans,
738 struct btree_trans_commit_hook *h)
740 h->next = trans->hooks;
744 int bch2_btree_insert_nonextent(struct btree_trans *trans,
745 enum btree_id btree, struct bkey_i *k,
746 enum btree_update_flags flags)
748 struct btree_iter iter;
751 bch2_trans_iter_init(trans, &iter, btree, k->k.p,
753 BTREE_ITER_NOT_EXTENTS|
755 ret = bch2_btree_iter_traverse(&iter) ?:
756 bch2_trans_update(trans, &iter, k, flags);
757 bch2_trans_iter_exit(trans, &iter);
761 int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
762 struct bkey_i *k, enum btree_update_flags flags)
764 struct btree_iter iter;
767 bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
770 ret = bch2_btree_iter_traverse(&iter) ?:
771 bch2_trans_update(trans, &iter, k, flags);
772 bch2_trans_iter_exit(trans, &iter);
777 * bch2_btree_insert - insert keys into the extent btree
778 * @c: pointer to struct bch_fs
779 * @id: btree to insert into
781 * @disk_res: must be non-NULL whenever inserting or potentially
782 * splitting data extents
783 * @flags: transaction commit flags
785 * Returns: 0 on success, error code on failure
787 int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k,
788 struct disk_reservation *disk_res, int flags)
790 return bch2_trans_do(c, disk_res, NULL, flags,
791 bch2_btree_insert_trans(trans, id, k, 0));
794 int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter,
795 unsigned len, unsigned update_flags)
799 k = bch2_trans_kmalloc(trans, sizeof(*k));
805 bch2_key_resize(&k->k, len);
806 return bch2_trans_update(trans, iter, k, update_flags);
809 int bch2_btree_delete_at(struct btree_trans *trans,
810 struct btree_iter *iter, unsigned update_flags)
812 return bch2_btree_delete_extent_at(trans, iter, 0, update_flags);
815 int bch2_btree_delete(struct btree_trans *trans,
816 enum btree_id btree, struct bpos pos,
817 unsigned update_flags)
819 struct btree_iter iter;
822 bch2_trans_iter_init(trans, &iter, btree, pos,
825 ret = bch2_btree_iter_traverse(&iter) ?:
826 bch2_btree_delete_at(trans, &iter, update_flags);
827 bch2_trans_iter_exit(trans, &iter);
832 int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
833 struct bpos start, struct bpos end,
834 unsigned update_flags,
837 u32 restart_count = trans->restart_count;
838 struct btree_iter iter;
842 bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT);
843 while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
844 struct disk_reservation disk_res =
845 bch2_disk_reservation_init(trans->c, 0);
846 struct bkey_i delete;
852 bkey_init(&delete.k);
855 * This could probably be more efficient for extents:
859 * For extents, iter.pos won't necessarily be the same as
860 * bkey_start_pos(k.k) (for non extents they always will be the
861 * same). It's important that we delete starting from iter.pos
862 * because the range we want to delete could start in the middle
865 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
866 * bkey_start_pos(k.k)).
868 delete.k.p = iter.pos;
870 if (iter.flags & BTREE_ITER_IS_EXTENTS)
871 bch2_key_resize(&delete.k,
872 bpos_min(end, k.k->p).offset -
875 ret = bch2_trans_update(trans, &iter, &delete, update_flags) ?:
876 bch2_trans_commit(trans, &disk_res, journal_seq,
877 BCH_TRANS_COMMIT_no_enospc);
878 bch2_disk_reservation_put(trans->c, &disk_res);
881 * the bch2_trans_begin() call is in a weird place because we
882 * need to call it after every transaction commit, to avoid path
883 * overflow, but don't want to call it if the delete operation
884 * is a no-op and we have no work to do:
886 bch2_trans_begin(trans);
888 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
893 bch2_trans_iter_exit(trans, &iter);
895 return ret ?: trans_was_restarted(trans, restart_count);
899 * bch_btree_delete_range - delete everything within a given range
901 * Range is a half open interval - [start, end)
903 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
904 struct bpos start, struct bpos end,
905 unsigned update_flags,
908 int ret = bch2_trans_run(c,
909 bch2_btree_delete_range_trans(trans, id, start, end,
910 update_flags, journal_seq));
911 if (ret == -BCH_ERR_transaction_restart_nested)
916 int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
917 struct bpos pos, bool set)
919 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
920 int ret = PTR_ERR_OR_ZERO(k);
925 k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
928 struct btree_iter iter;
929 bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
931 ret = bch2_btree_iter_traverse(&iter) ?:
932 bch2_trans_update(trans, &iter, k, 0);
933 bch2_trans_iter_exit(trans, &iter);
937 int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree,
938 struct bpos pos, bool set)
943 k.k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
946 return bch2_trans_update_buffered(trans, btree, &k);
949 static int __bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf, unsigned u64s)
951 struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s));
952 int ret = PTR_ERR_OR_ZERO(e);
956 struct jset_entry_log *l = container_of(e, struct jset_entry_log, entry);
957 journal_entry_init(e, BCH_JSET_ENTRY_log, 0, 1, u64s);
958 memcpy(l->d, buf->buf, buf->pos);
964 __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
967 struct printbuf buf = PRINTBUF;
968 prt_vprintf(&buf, fmt, args);
970 unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
971 prt_chars(&buf, '\0', u64s * sizeof(u64) - buf.pos);
973 int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
977 if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
978 ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
982 struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries);
983 journal_entry_init(&l->entry, BCH_JSET_ENTRY_log, 0, 1, u64s);
984 memcpy(l->d, buf.buf, buf.pos);
985 c->journal.early_journal_entries.nr += jset_u64s(u64s);
987 ret = bch2_trans_do(c, NULL, NULL,
988 BCH_TRANS_COMMIT_lazy_rw|commit_flags,
989 __bch2_trans_log_msg(trans, &buf, u64s));
997 int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
1002 va_start(args, fmt);
1003 ret = __bch2_fs_log_msg(c, 0, fmt, args);
1009 * Use for logging messages during recovery to enable reserved space and avoid
1013 int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
1018 va_start(args, fmt);
1019 ret = __bch2_fs_log_msg(c, BCH_WATERMARK_reclaim, fmt, args);