1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_update.h"
5 #include "btree_update_interior.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
15 #include "journal_reclaim.h"
19 #include <linux/sort.h>
20 #include <trace/events/bcachefs.h>
22 static inline bool same_leaf_as_prev(struct btree_trans *trans,
25 struct btree_insert_entry *i = trans->updates +
26 trans->updates_sorted[sorted_idx];
27 struct btree_insert_entry *prev = sorted_idx
28 ? trans->updates + trans->updates_sorted[sorted_idx - 1]
32 i->iter->l[0].b == prev->iter->l[0].b;
35 #define trans_for_each_update_sorted(_trans, _i, _iter) \
37 _iter < _trans->nr_updates && \
38 (_i = _trans->updates + _trans->updates_sorted[_iter], 1); \
41 inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
42 struct btree_iter *iter)
44 bch2_btree_node_lock_write(b, iter);
46 if (unlikely(btree_node_just_written(b)) &&
47 bch2_btree_post_write_cleanup(c, b))
48 bch2_btree_iter_reinit_node(iter, b);
51 * If the last bset has been written, or if it's gotten too big - start
52 * a new bset to insert into:
54 if (want_new_bset(c, b))
55 bch2_btree_init_next(c, b, iter);
58 static void btree_trans_lock_write(struct btree_trans *trans, bool lock)
60 struct bch_fs *c = trans->c;
61 struct btree_insert_entry *i;
64 trans_for_each_update_sorted(trans, i, iter) {
65 if (same_leaf_as_prev(trans, iter))
69 bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
71 bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
75 static inline void btree_trans_sort_updates(struct btree_trans *trans)
77 struct btree_insert_entry *l, *r;
80 trans_for_each_update(trans, l) {
81 for (pos = 0; pos < nr; pos++) {
82 r = trans->updates + trans->updates_sorted[pos];
84 if (btree_iter_cmp(l->iter, r->iter) <= 0)
88 memmove(&trans->updates_sorted[pos + 1],
89 &trans->updates_sorted[pos],
90 (nr - pos) * sizeof(trans->updates_sorted[0]));
92 trans->updates_sorted[pos] = l - trans->updates;
96 BUG_ON(nr != trans->nr_updates);
99 /* Inserting into a given leaf node (last stage of insert): */
101 /* Handle overwrites and do insert, for non extents: */
102 bool bch2_btree_bset_insert_key(struct btree_iter *iter,
104 struct btree_node_iter *node_iter,
105 struct bkey_i *insert)
107 const struct bkey_format *f = &b->format;
108 struct bkey_packed *k;
109 unsigned clobber_u64s;
111 EBUG_ON(btree_node_just_written(b));
112 EBUG_ON(bset_written(b, btree_bset_last(b)));
113 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
114 EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
115 bkey_cmp(insert->k.p, b->data->max_key) > 0);
117 k = bch2_btree_node_iter_peek_all(node_iter, b);
118 if (k && !bkey_cmp_packed(b, k, &insert->k)) {
119 BUG_ON(bkey_whiteout(k));
121 if (!bkey_written(b, k) &&
122 bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
123 !bkey_whiteout(&insert->k)) {
124 k->type = insert->k.type;
125 memcpy_u64s(bkeyp_val(f, k), &insert->v,
126 bkey_val_u64s(&insert->k));
130 insert->k.needs_whiteout = k->needs_whiteout;
132 btree_account_key_drop(b, k);
134 if (k >= btree_bset_last(b)->start) {
135 clobber_u64s = k->u64s;
138 * If we're deleting, and the key we're deleting doesn't
139 * need a whiteout (it wasn't overwriting a key that had
140 * been written to disk) - just delete it:
142 if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
143 bch2_bset_delete(b, k, clobber_u64s);
144 bch2_btree_node_iter_fix(iter, b, node_iter,
152 k->type = KEY_TYPE_deleted;
153 bch2_btree_node_iter_fix(iter, b, node_iter, k,
156 if (bkey_whiteout(&insert->k)) {
157 reserve_whiteout(b, k);
160 k->needs_whiteout = false;
164 * Deleting, but the key to delete wasn't found - nothing to do:
166 if (bkey_whiteout(&insert->k))
169 insert->k.needs_whiteout = false;
172 k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
175 bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
176 bch2_btree_node_iter_fix(iter, b, node_iter, k,
177 clobber_u64s, k->u64s);
181 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
184 struct bch_fs *c = container_of(j, struct bch_fs, journal);
185 struct btree_write *w = container_of(pin, struct btree_write, journal);
186 struct btree *b = container_of(w, struct btree, writes[i]);
188 btree_node_lock_type(c, b, SIX_LOCK_read);
189 bch2_btree_node_write_cond(c, b,
190 (btree_current_write(b) == w && w->journal.seq == seq));
191 six_unlock_read(&b->lock);
194 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
196 return __btree_node_flush(j, pin, 0, seq);
199 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
201 return __btree_node_flush(j, pin, 1, seq);
204 static inline void __btree_journal_key(struct btree_trans *trans,
205 enum btree_id btree_id,
206 struct bkey_i *insert)
208 struct journal *j = &trans->c->journal;
209 u64 seq = trans->journal_res.seq;
210 bool needs_whiteout = insert->k.needs_whiteout;
213 insert->k.needs_whiteout = false;
214 bch2_journal_add_keys(j, &trans->journal_res,
216 insert->k.needs_whiteout = needs_whiteout;
218 bch2_journal_set_has_inode(j, &trans->journal_res,
221 if (trans->journal_seq)
222 *trans->journal_seq = seq;
225 void bch2_btree_journal_key(struct btree_trans *trans,
226 struct btree_iter *iter,
227 struct bkey_i *insert)
229 struct bch_fs *c = trans->c;
230 struct journal *j = &c->journal;
231 struct btree *b = iter->l[0].b;
232 struct btree_write *w = btree_current_write(b);
234 EBUG_ON(iter->level || b->level);
235 EBUG_ON(trans->journal_res.ref !=
236 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
238 if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
239 __btree_journal_key(trans, iter->btree_id, insert);
240 btree_bset_last(b)->journal_seq =
241 cpu_to_le64(trans->journal_res.seq);
244 if (unlikely(!journal_pin_active(&w->journal))) {
245 u64 seq = likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
246 ? trans->journal_res.seq
247 : j->replay_journal_seq;
249 bch2_journal_pin_add(j, seq, &w->journal,
250 btree_node_write_idx(b) == 0
252 : btree_node_flush1);
255 if (unlikely(!btree_node_dirty(b)))
256 set_btree_node_dirty(b);
259 static void bch2_insert_fixup_key(struct btree_trans *trans,
260 struct btree_insert_entry *insert)
262 struct btree_iter *iter = insert->iter;
263 struct btree_iter_level *l = &iter->l[0];
265 EBUG_ON(iter->level);
266 EBUG_ON(insert->k->k.u64s >
267 bch_btree_keys_u64s_remaining(trans->c, l->b));
269 if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
271 bch2_btree_journal_key(trans, iter, insert->k);
275 * btree_insert_key - insert a key one key into a leaf node
277 static void btree_insert_key_leaf(struct btree_trans *trans,
278 struct btree_insert_entry *insert)
280 struct bch_fs *c = trans->c;
281 struct btree_iter *iter = insert->iter;
282 struct btree *b = iter->l[0].b;
283 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
284 int old_live_u64s = b->nr.live_u64s;
285 int live_u64s_added, u64s_added;
287 if (!btree_node_is_extents(b))
288 bch2_insert_fixup_key(trans, insert);
290 bch2_insert_fixup_extent(trans, insert);
292 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
293 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
295 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
296 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
297 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
298 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
300 if (u64s_added > live_u64s_added &&
301 bch2_maybe_compact_whiteouts(c, b))
302 bch2_btree_iter_reinit_node(iter, b);
304 trace_btree_insert_key(c, b, insert->k);
307 /* Normal update interface: */
309 static inline void btree_insert_entry_checks(struct btree_trans *trans,
310 struct btree_insert_entry *i)
312 struct bch_fs *c = trans->c;
314 BUG_ON(i->iter->level);
315 BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
316 EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
317 bkey_cmp(i->k->k.p, i->iter->l[0].b->key.k.p) > 0);
318 EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
319 !(trans->flags & BTREE_INSERT_ATOMIC));
321 BUG_ON(debug_check_bkeys(c) &&
322 !bkey_deleted(&i->k->k) &&
323 bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->iter->btree_id));
326 static int bch2_trans_journal_preres_get(struct btree_trans *trans)
328 struct bch_fs *c = trans->c;
329 struct btree_insert_entry *i;
333 trans_for_each_update(trans, i)
335 u64s += jset_u64s(i->k->k.u64s);
340 ret = bch2_journal_preres_get(&c->journal,
341 &trans->journal_preres, u64s,
342 JOURNAL_RES_GET_NONBLOCK);
346 bch2_trans_unlock(trans);
348 ret = bch2_journal_preres_get(&c->journal,
349 &trans->journal_preres, u64s, 0);
353 if (!bch2_trans_relock(trans)) {
354 trace_trans_restart_journal_preres_get(trans->ip);
361 static int bch2_trans_journal_res_get(struct btree_trans *trans,
364 struct bch_fs *c = trans->c;
367 if (trans->flags & BTREE_INSERT_JOURNAL_RESERVED)
368 flags |= JOURNAL_RES_GET_RESERVED;
370 ret = bch2_journal_res_get(&c->journal, &trans->journal_res,
371 trans->journal_u64s, flags);
373 return ret == -EAGAIN ? BTREE_INSERT_NEED_JOURNAL_RES : ret;
376 static enum btree_insert_ret
377 btree_key_can_insert(struct btree_trans *trans,
378 struct btree_insert_entry *insert,
381 struct bch_fs *c = trans->c;
382 struct btree *b = insert->iter->l[0].b;
383 static enum btree_insert_ret ret;
385 if (unlikely(btree_node_fake(b)))
386 return BTREE_INSERT_BTREE_NODE_FULL;
388 ret = !btree_node_is_extents(b)
390 : bch2_extent_can_insert(trans, insert, u64s);
394 if (*u64s > bch_btree_keys_u64s_remaining(c, b))
395 return BTREE_INSERT_BTREE_NODE_FULL;
397 return BTREE_INSERT_OK;
400 static int btree_trans_check_can_insert(struct btree_trans *trans,
401 struct btree_insert_entry **stopped_at)
403 struct btree_insert_entry *i;
404 unsigned iter, u64s = 0;
407 trans_for_each_update_sorted(trans, i, iter) {
408 /* Multiple inserts might go to same leaf: */
409 if (!same_leaf_as_prev(trans, iter))
412 u64s += i->k->k.u64s;
413 ret = btree_key_can_insert(trans, i, &u64s);
423 static inline void do_btree_insert_one(struct btree_trans *trans,
424 struct btree_insert_entry *insert)
426 btree_insert_key_leaf(trans, insert);
429 static inline bool update_triggers_transactional(struct btree_trans *trans,
430 struct btree_insert_entry *i)
432 return likely(!(trans->flags & BTREE_INSERT_MARK_INMEM)) &&
433 (i->iter->btree_id == BTREE_ID_EXTENTS ||
434 i->iter->btree_id == BTREE_ID_INODES ||
435 i->iter->btree_id == BTREE_ID_REFLINK);
438 static inline bool update_has_triggers(struct btree_trans *trans,
439 struct btree_insert_entry *i)
441 return likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
442 btree_node_type_needs_gc(i->iter->btree_id);
446 * Get journal reservation, take write locks, and attempt to do btree update(s):
448 static inline int do_btree_insert_at(struct btree_trans *trans,
449 struct btree_insert_entry **stopped_at)
451 struct bch_fs *c = trans->c;
452 struct bch_fs_usage *fs_usage = NULL;
453 struct btree_insert_entry *i;
454 struct btree_iter *iter;
455 unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
456 ? BCH_BUCKET_MARK_BUCKET_INVALIDATE
460 trans_for_each_update(trans, i)
461 BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
464 * note: running triggers will append more updates to the list of
465 * updates as we're walking it:
467 trans_for_each_update(trans, i)
468 if (update_has_triggers(trans, i) &&
469 update_triggers_transactional(trans, i)) {
470 ret = bch2_trans_mark_update(trans, i->iter, i->k);
472 trace_trans_restart_mark(trans->ip);
474 goto out_clear_replicas;
477 trans_for_each_iter(trans, iter) {
478 if (iter->nodes_locked != iter->nodes_intent_locked) {
479 BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
480 BUG_ON(trans->iters_live & (1ULL << iter->idx));
481 __bch2_btree_iter_unlock(iter);
485 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
486 trans_for_each_update(trans, i)
487 btree_insert_entry_checks(trans, i);
488 bch2_btree_trans_verify_locks(trans);
491 * No more updates can be added - sort updates so we can take write
492 * locks in the correct order:
494 btree_trans_sort_updates(trans);
496 btree_trans_lock_write(trans, true);
500 trace_trans_restart_fault_inject(trans->ip);
505 * Check if the insert will fit in the leaf node with the write lock
506 * held, otherwise another thread could write the node changing the
507 * amount of space available:
509 ret = btree_trans_check_can_insert(trans, stopped_at);
513 trans_for_each_update(trans, i) {
514 if (!btree_node_type_needs_gc(i->iter->btree_id))
518 percpu_down_read(&c->mark_lock);
519 fs_usage = bch2_fs_usage_scratch_get(c);
522 if (!bch2_bkey_replicas_marked_locked(c,
523 bkey_i_to_s_c(i->k), true)) {
524 ret = BTREE_INSERT_NEED_MARK_REPLICAS;
530 * Don't get journal reservation until after we know insert will
533 if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
534 trans->journal_u64s = 0;
536 trans_for_each_update(trans, i)
537 trans->journal_u64s += jset_u64s(i->k->k.u64s);
539 ret = bch2_trans_journal_res_get(trans, JOURNAL_RES_GET_NONBLOCK);
544 if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) {
545 if (journal_seq_verify(c))
546 trans_for_each_update(trans, i)
547 i->k->k.version.lo = trans->journal_res.seq;
548 else if (inject_invalid_keys(c))
549 trans_for_each_update(trans, i)
550 i->k->k.version = MAX_VERSION;
553 trans_for_each_update(trans, i)
554 if (update_has_triggers(trans, i) &&
555 !update_triggers_transactional(trans, i))
556 bch2_mark_update(trans, i, fs_usage, mark_flags);
558 if (fs_usage && trans->fs_usage_deltas)
559 bch2_replicas_delta_list_apply(c, fs_usage,
560 trans->fs_usage_deltas);
563 bch2_trans_fs_usage_apply(trans, fs_usage);
565 if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
566 unlikely(c->gc_pos.phase))
567 trans_for_each_update(trans, i)
568 if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
569 bch2_mark_update(trans, i, NULL,
573 trans_for_each_update(trans, i)
574 do_btree_insert_one(trans, i);
577 (trans->flags & BTREE_INSERT_JOURNAL_RESERVED) &&
578 trans->journal_res.ref);
580 btree_trans_lock_write(trans, false);
583 bch2_fs_usage_scratch_put(c, fs_usage);
584 percpu_up_read(&c->mark_lock);
587 bch2_journal_res_put(&c->journal, &trans->journal_res);
589 if (trans->fs_usage_deltas) {
590 memset(&trans->fs_usage_deltas->fs_usage, 0,
591 sizeof(trans->fs_usage_deltas->fs_usage));
592 trans->fs_usage_deltas->used = 0;
599 int bch2_trans_commit_error(struct btree_trans *trans,
600 struct btree_insert_entry *i,
603 struct bch_fs *c = trans->c;
604 unsigned flags = trans->flags;
607 * BTREE_INSERT_NOUNLOCK means don't unlock _after_ successful btree
608 * update; if we haven't done anything yet it doesn't apply
610 flags &= ~BTREE_INSERT_NOUNLOCK;
613 case BTREE_INSERT_BTREE_NODE_FULL:
614 ret = bch2_btree_split_leaf(c, i->iter, flags);
617 * if the split succeeded without dropping locks the insert will
618 * still be atomic (in the BTREE_INSERT_ATOMIC sense, what the
619 * caller peeked() and is overwriting won't have changed)
624 * split -> btree node merging (of parent node) might still drop
625 * locks when we're not passing it BTREE_INSERT_NOUNLOCK
627 * we don't want to pass BTREE_INSERT_NOUNLOCK to split as that
628 * will inhibit merging - but we don't have a reliable way yet
629 * (do we?) of checking if we dropped locks in this path
636 * don't care if we got ENOSPC because we told split it
641 (flags & BTREE_INSERT_NOUNLOCK)) {
642 trace_trans_restart_btree_node_split(trans->ip);
646 case BTREE_INSERT_ENOSPC:
649 case BTREE_INSERT_NEED_MARK_REPLICAS:
650 bch2_trans_unlock(trans);
652 trans_for_each_update(trans, i) {
653 ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(i->k));
658 if (bch2_trans_relock(trans))
661 trace_trans_restart_mark_replicas(trans->ip);
664 case BTREE_INSERT_NEED_JOURNAL_RES:
665 bch2_trans_unlock(trans);
667 ret = bch2_trans_journal_res_get(trans, JOURNAL_RES_GET_CHECK);
671 if (bch2_trans_relock(trans))
674 trace_trans_restart_journal_res_get(trans->ip);
683 int ret2 = bch2_btree_iter_traverse_all(trans);
686 trace_trans_restart_traverse(trans->ip);
691 * BTREE_ITER_ATOMIC means we have to return -EINTR if we
694 if (!(flags & BTREE_INSERT_ATOMIC))
697 trace_trans_restart_atomic(trans->ip);
704 * __bch_btree_insert_at - insert keys at given iterator positions
706 * This is main entry point for btree updates.
709 * -EINTR: locking changed, this function should be called again. Only returned
710 * if passed BTREE_INSERT_ATOMIC.
711 * -EROFS: filesystem read only
712 * -EIO: journal or btree node IO error
714 static int __bch2_trans_commit(struct btree_trans *trans,
715 struct btree_insert_entry **stopped_at)
717 struct bch_fs *c = trans->c;
718 struct btree_insert_entry *i;
722 trans_for_each_update(trans, i) {
723 if (!bch2_btree_iter_upgrade(i->iter, 1)) {
724 trace_trans_restart_upgrade(trans->ip);
729 ret = btree_iter_err(i->iter);
734 ret = do_btree_insert_at(trans, stopped_at);
738 if (trans->flags & BTREE_INSERT_NOUNLOCK)
739 trans->nounlock = true;
741 trans_for_each_update_sorted(trans, i, iter)
742 if (!same_leaf_as_prev(trans, iter))
743 bch2_foreground_maybe_merge(c, i->iter,
746 trans->nounlock = false;
748 trans_for_each_update(trans, i)
749 bch2_btree_iter_downgrade(i->iter);
751 /* make sure we didn't drop or screw up locks: */
752 bch2_btree_trans_verify_locks(trans);
757 int bch2_trans_commit(struct btree_trans *trans,
758 struct disk_reservation *disk_res,
762 struct bch_fs *c = trans->c;
763 struct btree_insert_entry *i = NULL;
764 struct btree_iter *iter;
765 unsigned orig_nr_updates = trans->nr_updates;
766 unsigned orig_mem_top = trans->mem_top;
769 if (!trans->nr_updates)
772 /* for the sake of sanity: */
773 BUG_ON(trans->nr_updates > 1 && !(flags & BTREE_INSERT_ATOMIC));
775 if (flags & BTREE_INSERT_GC_LOCK_HELD)
776 lockdep_assert_held(&c->gc_lock);
778 if (!trans->commit_start)
779 trans->commit_start = local_clock();
781 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
782 memset(&trans->journal_preres, 0, sizeof(trans->journal_preres));
783 trans->disk_res = disk_res;
784 trans->journal_seq = journal_seq;
785 trans->flags = flags;
787 if (unlikely(!(trans->flags & BTREE_INSERT_NOCHECK_RW) &&
788 !percpu_ref_tryget(&c->writes))) {
789 if (likely(!(trans->flags & BTREE_INSERT_LAZY_RW)))
792 bch2_trans_unlock(trans);
794 ret = bch2_fs_read_write_early(c);
798 percpu_ref_get(&c->writes);
800 if (!bch2_trans_relock(trans)) {
806 ret = bch2_trans_journal_preres_get(trans);
810 ret = __bch2_trans_commit(trans, &i);
814 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
816 if (unlikely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
817 percpu_ref_put(&c->writes);
819 if (!ret && trans->commit_start) {
820 bch2_time_stats_update(&c->times[BCH_TIME_btree_update],
821 trans->commit_start);
822 trans->commit_start = 0;
825 BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
827 trans_for_each_iter(trans, iter)
828 iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
831 bch2_trans_unlink_iters(trans);
832 trans->iters_touched = 0;
834 trans->nr_updates = 0;
839 ret = bch2_trans_commit_error(trans, i, ret);
841 /* free updates and memory used by triggers, they'll be reexecuted: */
842 trans->nr_updates = orig_nr_updates;
843 trans->mem_top = orig_mem_top;
845 /* can't loop if it was passed in and we changed it: */
846 if (unlikely(trans->flags & BTREE_INSERT_NO_CLEAR_REPLICAS) && !ret)
856 * bch2_btree_insert - insert keys into the extent btree
857 * @c: pointer to struct bch_fs
858 * @id: btree to insert into
859 * @insert_keys: list of keys to insert
860 * @hook: insert callback
862 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
864 struct disk_reservation *disk_res,
865 u64 *journal_seq, int flags)
867 struct btree_trans trans;
868 struct btree_iter *iter;
871 bch2_trans_init(&trans, c, 0, 0);
873 bch2_trans_begin(&trans);
875 iter = bch2_trans_get_iter(&trans, id, bkey_start_pos(&k->k),
878 bch2_trans_update(&trans, iter, k);
880 ret = bch2_trans_commit(&trans, disk_res, journal_seq, flags);
883 bch2_trans_exit(&trans);
888 int bch2_btree_delete_at_range(struct btree_trans *trans,
889 struct btree_iter *iter,
896 while ((k = bch2_btree_iter_peek(iter)).k &&
897 !(ret = bkey_err(k)) &&
898 bkey_cmp(iter->pos, end) < 0) {
899 struct bkey_i delete;
901 bkey_init(&delete.k);
904 * For extents, iter.pos won't necessarily be the same as
905 * bkey_start_pos(k.k) (for non extents they always will be the
906 * same). It's important that we delete starting from iter.pos
907 * because the range we want to delete could start in the middle
910 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
911 * bkey_start_pos(k.k)).
913 delete.k.p = iter->pos;
915 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
916 unsigned max_sectors =
917 KEY_SIZE_MAX & (~0 << trans->c->block_bits);
919 /* create the biggest key we can */
920 bch2_key_resize(&delete.k, max_sectors);
921 bch2_cut_back(end, &delete.k);
923 ret = bch2_extent_trim_atomic(&delete, iter);
928 bch2_trans_update(trans, iter, &delete);
929 ret = bch2_trans_commit(trans, NULL, journal_seq,
931 BTREE_INSERT_NOFAIL);
935 bch2_trans_cond_resched(trans);
947 int bch2_btree_delete_at(struct btree_trans *trans,
948 struct btree_iter *iter, unsigned flags)
955 bch2_trans_update(trans, iter, &k);
956 return bch2_trans_commit(trans, NULL, NULL,
958 BTREE_INSERT_USE_RESERVE|flags);
962 * bch_btree_delete_range - delete everything within a given range
964 * Range is a half open interval - [start, end)
966 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
967 struct bpos start, struct bpos end,
970 struct btree_trans trans;
971 struct btree_iter *iter;
975 * XXX: whether we need mem/more iters depends on whether this btree id
978 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
980 iter = bch2_trans_get_iter(&trans, id, start, BTREE_ITER_INTENT);
982 ret = bch2_btree_delete_at_range(&trans, iter, end, journal_seq);
983 ret = bch2_trans_exit(&trans) ?: ret;
985 BUG_ON(ret == -EINTR);