3 #include "btree_update.h"
4 #include "btree_update_interior.h"
6 #include "btree_iter.h"
7 #include "btree_locking.h"
13 #include "journal_reclaim.h"
17 #include <linux/sort.h>
18 #include <trace/events/bcachefs.h>
20 /* Inserting into a given leaf node (last stage of insert): */
22 /* Handle overwrites and do insert, for non extents: */
23 bool bch2_btree_bset_insert_key(struct btree_iter *iter,
25 struct btree_node_iter *node_iter,
26 struct bkey_i *insert)
28 const struct bkey_format *f = &b->format;
29 struct bkey_packed *k;
30 unsigned clobber_u64s;
32 EBUG_ON(btree_node_just_written(b));
33 EBUG_ON(bset_written(b, btree_bset_last(b)));
34 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
35 EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
36 bkey_cmp(insert->k.p, b->data->max_key) > 0);
38 k = bch2_btree_node_iter_peek_all(node_iter, b);
39 if (k && !bkey_cmp_packed(b, k, &insert->k)) {
40 BUG_ON(bkey_whiteout(k));
42 if (!bkey_written(b, k) &&
43 bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
44 !bkey_whiteout(&insert->k)) {
45 k->type = insert->k.type;
46 memcpy_u64s(bkeyp_val(f, k), &insert->v,
47 bkey_val_u64s(&insert->k));
51 insert->k.needs_whiteout = k->needs_whiteout;
53 btree_account_key_drop(b, k);
55 if (k >= btree_bset_last(b)->start) {
56 clobber_u64s = k->u64s;
59 * If we're deleting, and the key we're deleting doesn't
60 * need a whiteout (it wasn't overwriting a key that had
61 * been written to disk) - just delete it:
63 if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
64 bch2_bset_delete(b, k, clobber_u64s);
65 bch2_btree_node_iter_fix(iter, b, node_iter,
67 bch2_btree_iter_verify(iter, b);
74 k->type = KEY_TYPE_deleted;
75 bch2_btree_node_iter_fix(iter, b, node_iter, k,
77 bch2_btree_iter_verify(iter, b);
79 if (bkey_whiteout(&insert->k)) {
80 reserve_whiteout(b, k);
83 k->needs_whiteout = false;
87 * Deleting, but the key to delete wasn't found - nothing to do:
89 if (bkey_whiteout(&insert->k))
92 insert->k.needs_whiteout = false;
95 k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
98 bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
99 if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
100 bch2_btree_node_iter_fix(iter, b, node_iter, k,
101 clobber_u64s, k->u64s);
102 bch2_btree_iter_verify(iter, b);
106 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
109 struct bch_fs *c = container_of(j, struct bch_fs, journal);
110 struct btree_write *w = container_of(pin, struct btree_write, journal);
111 struct btree *b = container_of(w, struct btree, writes[i]);
113 btree_node_lock_type(c, b, SIX_LOCK_read);
114 bch2_btree_node_write_cond(c, b,
115 (btree_current_write(b) == w && w->journal.seq == seq));
116 six_unlock_read(&b->lock);
119 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
121 return __btree_node_flush(j, pin, 0, seq);
124 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
126 return __btree_node_flush(j, pin, 1, seq);
129 static inline void __btree_journal_key(struct btree_insert *trans,
130 enum btree_id btree_id,
131 struct bkey_i *insert)
133 struct journal *j = &trans->c->journal;
134 u64 seq = trans->journal_res.seq;
135 bool needs_whiteout = insert->k.needs_whiteout;
138 insert->k.needs_whiteout = false;
139 bch2_journal_add_keys(j, &trans->journal_res,
141 insert->k.needs_whiteout = needs_whiteout;
143 bch2_journal_set_has_inode(j, &trans->journal_res,
146 if (trans->journal_seq)
147 *trans->journal_seq = seq;
150 void bch2_btree_journal_key(struct btree_insert *trans,
151 struct btree_iter *iter,
152 struct bkey_i *insert)
154 struct bch_fs *c = trans->c;
155 struct journal *j = &c->journal;
156 struct btree *b = iter->l[0].b;
157 struct btree_write *w = btree_current_write(b);
159 EBUG_ON(iter->level || b->level);
160 EBUG_ON(trans->journal_res.ref !=
161 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
163 if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
164 __btree_journal_key(trans, iter->btree_id, insert);
165 btree_bset_last(b)->journal_seq =
166 cpu_to_le64(trans->journal_res.seq);
169 if (unlikely(!journal_pin_active(&w->journal))) {
170 u64 seq = likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
171 ? trans->journal_res.seq
172 : j->replay_journal_seq;
174 bch2_journal_pin_add(j, seq, &w->journal,
175 btree_node_write_idx(b) == 0
177 : btree_node_flush1);
180 if (unlikely(!btree_node_dirty(b)))
181 set_btree_node_dirty(b);
184 static enum btree_insert_ret
185 bch2_insert_fixup_key(struct btree_insert *trans,
186 struct btree_insert_entry *insert)
188 struct btree_iter *iter = insert->iter;
189 struct btree_iter_level *l = &iter->l[0];
191 EBUG_ON(iter->level);
192 EBUG_ON(insert->k->k.u64s >
193 bch_btree_keys_u64s_remaining(trans->c, l->b));
195 if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
197 bch2_btree_journal_key(trans, iter, insert->k);
199 return BTREE_INSERT_OK;
203 * btree_insert_key - insert a key one key into a leaf node
205 static enum btree_insert_ret
206 btree_insert_key_leaf(struct btree_insert *trans,
207 struct btree_insert_entry *insert)
209 struct bch_fs *c = trans->c;
210 struct btree_iter *iter = insert->iter;
211 struct btree *b = iter->l[0].b;
212 enum btree_insert_ret ret;
213 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
214 int old_live_u64s = b->nr.live_u64s;
215 int live_u64s_added, u64s_added;
217 bch2_mark_update(trans, insert);
219 ret = !btree_node_is_extents(b)
220 ? bch2_insert_fixup_key(trans, insert)
221 : bch2_insert_fixup_extent(trans, insert);
223 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
224 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
226 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
227 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
228 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
229 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
231 if (u64s_added > live_u64s_added &&
232 bch2_maybe_compact_whiteouts(c, b))
233 bch2_btree_iter_reinit_node(iter, b);
235 trace_btree_insert_key(c, b, insert->k);
239 /* Deferred btree updates: */
241 static void deferred_update_flush(struct journal *j,
242 struct journal_entry_pin *pin,
245 struct bch_fs *c = container_of(j, struct bch_fs, journal);
246 struct deferred_update *d =
247 container_of(pin, struct deferred_update, journal);
249 struct bkey_i *k = (void *) tmp;
253 if (d->allocated_u64s > ARRAY_SIZE(tmp)) {
254 k = kmalloc(d->allocated_u64s * sizeof(u64), GFP_NOFS);
256 BUG_ON(!k); /* XXX */
262 if (journal_pin_active(&d->journal)) {
263 BUG_ON(d->k.k.u64s > d->allocated_u64s);
266 spin_unlock(&d->lock);
268 ret = bch2_btree_insert(c, d->btree_id, k, NULL, NULL,
269 BTREE_INSERT_NOFAIL);
270 bch2_fs_fatal_err_on(ret && !bch2_journal_error(j),
271 c, "error flushing deferred btree update: %i", ret);
277 bch2_journal_pin_drop(j, &d->journal);
278 spin_unlock(&d->lock);
280 if (k != (void *) tmp)
284 static enum btree_insert_ret
285 btree_insert_key_deferred(struct btree_insert *trans,
286 struct btree_insert_entry *insert)
288 struct bch_fs *c = trans->c;
289 struct journal *j = &c->journal;
290 struct deferred_update *d = insert->d;
292 BUG_ON(trans->flags & BTREE_INSERT_JOURNAL_REPLAY);
293 BUG_ON(insert->k->u64s > d->allocated_u64s);
295 __btree_journal_key(trans, d->btree_id, insert->k);
299 bkey_copy(&d->k, insert->k);
300 spin_unlock(&d->lock);
302 bch2_journal_pin_update(j, trans->journal_res.seq, &d->journal,
303 deferred_update_flush);
305 return BTREE_INSERT_OK;
308 void bch2_deferred_update_free(struct bch_fs *c,
309 struct deferred_update *d)
311 deferred_update_flush(&c->journal, &d->journal, 0);
313 BUG_ON(journal_pin_active(&d->journal));
315 bch2_journal_pin_flush(&c->journal, &d->journal);
319 struct deferred_update *
320 bch2_deferred_update_alloc(struct bch_fs *c,
321 enum btree_id btree_id,
324 struct deferred_update *d;
326 BUG_ON(u64s > U8_MAX);
328 d = kmalloc(offsetof(struct deferred_update, k) +
329 u64s * sizeof(u64), GFP_NOFS);
332 memset(d, 0, offsetof(struct deferred_update, k));
334 spin_lock_init(&d->lock);
335 d->allocated_u64s = u64s;
336 d->btree_id = btree_id;
341 /* struct btree_insert operations: */
344 * We sort transaction entries so that if multiple iterators point to the same
345 * leaf node they'll be adjacent:
347 static bool same_leaf_as_prev(struct btree_insert *trans,
348 struct btree_insert_entry *i)
350 return i != trans->entries &&
352 i[0].iter->l[0].b == i[-1].iter->l[0].b;
355 #define __trans_next_entry(_trans, _i, _filter) \
357 while ((_i) < (_trans)->entries + (_trans->nr) && !(_filter)) \
360 (_i) < (_trans)->entries + (_trans->nr); \
363 #define __trans_for_each_entry(_trans, _i, _filter) \
364 for ((_i) = (_trans)->entries; \
365 __trans_next_entry(_trans, _i, _filter); \
368 #define trans_for_each_entry(trans, i) \
369 __trans_for_each_entry(trans, i, true)
371 #define trans_for_each_iter(trans, i) \
372 __trans_for_each_entry(trans, i, !(i)->deferred)
374 #define trans_for_each_leaf(trans, i) \
375 __trans_for_each_entry(trans, i, !(i)->deferred && \
376 !same_leaf_as_prev(trans, i))
378 inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
379 struct btree_iter *iter)
381 bch2_btree_node_lock_write(b, iter);
383 if (btree_node_just_written(b) &&
384 bch2_btree_post_write_cleanup(c, b))
385 bch2_btree_iter_reinit_node(iter, b);
388 * If the last bset has been written, or if it's gotten too big - start
389 * a new bset to insert into:
391 if (want_new_bset(c, b))
392 bch2_btree_init_next(c, b, iter);
395 static void multi_lock_write(struct bch_fs *c, struct btree_insert *trans)
397 struct btree_insert_entry *i;
399 trans_for_each_leaf(trans, i)
400 bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
403 static void multi_unlock_write(struct btree_insert *trans)
405 struct btree_insert_entry *i;
407 trans_for_each_leaf(trans, i)
408 bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
411 static inline int btree_trans_cmp(struct btree_insert_entry l,
412 struct btree_insert_entry r)
414 return (l.deferred > r.deferred) - (l.deferred < r.deferred) ?:
415 btree_iter_cmp(l.iter, r.iter);
418 /* Normal update interface: */
420 static enum btree_insert_ret
421 btree_key_can_insert(struct btree_insert *trans,
422 struct btree_insert_entry *insert,
425 struct bch_fs *c = trans->c;
426 struct btree *b = insert->iter->l[0].b;
427 static enum btree_insert_ret ret;
429 if (unlikely(btree_node_fake(b)))
430 return BTREE_INSERT_BTREE_NODE_FULL;
432 if (!bch2_bkey_replicas_marked(c,
433 bkey_i_to_s_c(insert->k),
435 return BTREE_INSERT_NEED_MARK_REPLICAS;
437 ret = !btree_node_is_extents(b)
439 : bch2_extent_can_insert(trans, insert, u64s);
443 if (*u64s > bch_btree_keys_u64s_remaining(c, b))
444 return BTREE_INSERT_BTREE_NODE_FULL;
446 return BTREE_INSERT_OK;
449 static inline enum btree_insert_ret
450 do_btree_insert_one(struct btree_insert *trans,
451 struct btree_insert_entry *insert)
453 return likely(!insert->deferred)
454 ? btree_insert_key_leaf(trans, insert)
455 : btree_insert_key_deferred(trans, insert);
459 * Get journal reservation, take write locks, and attempt to do btree update(s):
461 static inline int do_btree_insert_at(struct btree_insert *trans,
462 struct btree_insert_entry **stopped_at)
464 struct bch_fs *c = trans->c;
465 struct btree_insert_entry *i;
466 struct btree_iter *linked;
470 trans_for_each_iter(trans, i)
471 BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
473 /* reserve space for deferred updates */
474 __trans_for_each_entry(trans, i, i->deferred) {
478 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
480 if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
482 trans_for_each_entry(trans, i)
483 u64s += jset_u64s(i->k->k.u64s);
485 while ((ret = bch2_journal_res_get(&c->journal,
486 &trans->journal_res, u64s,
487 JOURNAL_RES_GET_NONBLOCK)) == -EAGAIN) {
488 struct btree_iter *iter = NULL;
490 trans_for_each_iter(trans, i)
494 bch2_btree_iter_unlock(iter);
496 ret = bch2_journal_res_get(&c->journal,
497 &trans->journal_res, u64s,
498 JOURNAL_RES_GET_CHECK);
502 if (iter && !bch2_btree_iter_relock(iter)) {
503 trans_restart(" (iter relock after journal res get blocked)");
512 multi_lock_write(c, trans);
516 trans_restart(" (race)");
521 * Check if the insert will fit in the leaf node with the write lock
522 * held, otherwise another thread could write the node changing the
523 * amount of space available:
526 trans_for_each_iter(trans, i) {
527 /* Multiple inserts might go to same leaf: */
528 if (!same_leaf_as_prev(trans, i))
531 u64s += i->k->k.u64s;
532 ret = btree_key_can_insert(trans, i, &u64s);
539 if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) {
540 if (journal_seq_verify(c))
541 trans_for_each_entry(trans, i)
542 i->k->k.version.lo = trans->journal_res.seq;
543 else if (inject_invalid_keys(c))
544 trans_for_each_entry(trans, i)
545 i->k->k.version = MAX_VERSION;
548 if (trans->flags & BTREE_INSERT_NOUNLOCK) {
550 * linked iterators that weren't being updated may or may not
551 * have been traversed/locked, depending on what the caller was
554 trans_for_each_iter(trans, i) {
555 for_each_btree_iter(i->iter, linked)
556 if (linked->uptodate < BTREE_ITER_NEED_RELOCK)
557 linked->flags |= BTREE_ITER_NOUNLOCK;
561 trans->did_work = true;
563 trans_for_each_entry(trans, i) {
564 switch (do_btree_insert_one(trans, i)) {
565 case BTREE_INSERT_OK:
567 case BTREE_INSERT_NEED_TRAVERSE:
568 BUG_ON((trans->flags &
569 (BTREE_INSERT_ATOMIC|BTREE_INSERT_NOUNLOCK)));
577 multi_unlock_write(trans);
578 bch2_journal_res_put(&c->journal, &trans->journal_res);
583 static inline void btree_insert_entry_checks(struct bch_fs *c,
584 struct btree_insert_entry *i)
586 enum btree_id btree_id = !i->deferred
591 BUG_ON(i->iter->level);
592 BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
594 bch2_btree_iter_verify_locks(i->iter);
597 BUG_ON(debug_check_bkeys(c) &&
598 !bkey_deleted(&i->k->k) &&
599 bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), btree_id));
603 * __bch_btree_insert_at - insert keys at given iterator positions
605 * This is main entry point for btree updates.
608 * -EINTR: locking changed, this function should be called again. Only returned
609 * if passed BTREE_INSERT_ATOMIC.
610 * -EROFS: filesystem read only
611 * -EIO: journal or btree node IO error
613 int __bch2_btree_insert_at(struct btree_insert *trans)
615 struct bch_fs *c = trans->c;
616 struct btree_insert_entry *i;
617 struct btree_iter *linked;
623 /* for the sake of sanity: */
624 BUG_ON(trans->nr > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
626 bubble_sort(trans->entries, trans->nr, btree_trans_cmp);
628 trans_for_each_entry(trans, i)
629 btree_insert_entry_checks(c, i);
631 if (unlikely(!percpu_ref_tryget(&c->writes)))
634 trans_for_each_iter(trans, i) {
635 unsigned old_locks_want = i->iter->locks_want;
636 unsigned old_uptodate = i->iter->uptodate;
638 if (!bch2_btree_iter_upgrade(i->iter, 1, true)) {
639 trans_restart(" (failed upgrade, locks_want %u uptodate %u)",
640 old_locks_want, old_uptodate);
645 if (i->iter->flags & BTREE_ITER_ERROR) {
651 ret = do_btree_insert_at(trans, &i);
655 trans_for_each_leaf(trans, i)
656 bch2_foreground_maybe_merge(c, i->iter, 0, trans->flags);
658 trans_for_each_iter(trans, i)
659 bch2_btree_iter_downgrade(i->iter);
661 percpu_ref_put(&c->writes);
663 /* make sure we didn't drop or screw up locks: */
664 trans_for_each_iter(trans, i) {
665 bch2_btree_iter_verify_locks(i->iter);
669 trans_for_each_iter(trans, i) {
670 for_each_btree_iter(i->iter, linked)
671 linked->flags &= ~BTREE_ITER_NOUNLOCK;
675 BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
679 flags = trans->flags;
682 * BTREE_INSERT_NOUNLOCK means don't unlock _after_ successful btree
683 * update; if we haven't done anything yet it doesn't apply
685 if (!trans->did_work)
686 flags &= ~BTREE_INSERT_NOUNLOCK;
689 case BTREE_INSERT_BTREE_NODE_FULL:
690 ret = bch2_btree_split_leaf(c, i->iter, flags);
693 * if the split succeeded without dropping locks the insert will
694 * still be atomic (in the BTREE_INSERT_ATOMIC sense, what the
695 * caller peeked() and is overwriting won't have changed)
700 * split -> btree node merging (of parent node) might still drop
701 * locks when we're not passing it BTREE_INSERT_NOUNLOCK
703 if (!ret && !trans->did_work)
708 * don't care if we got ENOSPC because we told split it
711 if (!ret || (flags & BTREE_INSERT_NOUNLOCK)) {
712 trans_restart(" (split)");
716 case BTREE_INSERT_NEED_GC_LOCK:
719 if (!down_read_trylock(&c->gc_lock)) {
720 if (flags & BTREE_INSERT_NOUNLOCK)
723 bch2_btree_iter_unlock(trans->entries[0].iter);
724 down_read(&c->gc_lock);
726 up_read(&c->gc_lock);
728 case BTREE_INSERT_ENOSPC:
731 case BTREE_INSERT_NEED_MARK_REPLICAS:
732 if (flags & BTREE_INSERT_NOUNLOCK) {
737 bch2_btree_iter_unlock(trans->entries[0].iter);
738 ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(i->k))
747 if (flags & BTREE_INSERT_NOUNLOCK) {
748 trans_restart(" (can't unlock)");
752 trans_for_each_iter(trans, i) {
753 int ret2 = bch2_btree_iter_traverse(i->iter);
756 trans_restart(" (traverse)");
760 BUG_ON(i->iter->uptodate > BTREE_ITER_NEED_PEEK);
764 * BTREE_ITER_ATOMIC means we have to return -EINTR if we
767 if (!(flags & BTREE_INSERT_ATOMIC))
770 trans_restart(" (atomic)");
776 int bch2_trans_commit(struct btree_trans *trans,
777 struct disk_reservation *disk_res,
781 struct btree_insert insert = {
783 .disk_res = disk_res,
784 .journal_seq = journal_seq,
786 .nr = trans->nr_updates,
787 .entries = trans->updates,
790 if (!trans->nr_updates)
793 trans->nr_updates = 0;
795 return __bch2_btree_insert_at(&insert);
798 int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
805 return bch2_btree_insert_at(iter->c, NULL, NULL,
807 BTREE_INSERT_USE_RESERVE|flags,
808 BTREE_INSERT_ENTRY(iter, &k));
811 int bch2_btree_insert_list_at(struct btree_iter *iter,
812 struct keylist *keys,
813 struct disk_reservation *disk_res,
814 u64 *journal_seq, unsigned flags)
816 BUG_ON(flags & BTREE_INSERT_ATOMIC);
817 BUG_ON(bch2_keylist_empty(keys));
818 bch2_verify_keylist_sorted(keys);
820 while (!bch2_keylist_empty(keys)) {
821 int ret = bch2_btree_insert_at(iter->c, disk_res,
823 BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
827 bch2_keylist_pop_front(keys);
834 * bch_btree_insert - insert keys into the extent btree
835 * @c: pointer to struct bch_fs
836 * @id: btree to insert into
837 * @insert_keys: list of keys to insert
838 * @hook: insert callback
840 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
842 struct disk_reservation *disk_res,
843 u64 *journal_seq, int flags)
845 struct btree_iter iter;
848 bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
850 ret = bch2_btree_insert_at(c, disk_res, journal_seq, flags,
851 BTREE_INSERT_ENTRY(&iter, k));
852 bch2_btree_iter_unlock(&iter);
858 * bch_btree_delete_range - delete everything within a given range
860 * Range is a half open interval - [start, end)
862 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
863 struct bpos start, struct bpos end,
866 struct btree_iter iter;
870 bch2_btree_iter_init(&iter, c, id, start,
873 while ((k = bch2_btree_iter_peek(&iter)).k &&
874 !(ret = btree_iter_err(k)) &&
875 bkey_cmp(iter.pos, end) < 0) {
876 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
877 /* really shouldn't be using a bare, unpadded bkey_i */
878 struct bkey_i delete;
880 bkey_init(&delete.k);
883 * For extents, iter.pos won't necessarily be the same as
884 * bkey_start_pos(k.k) (for non extents they always will be the
885 * same). It's important that we delete starting from iter.pos
886 * because the range we want to delete could start in the middle
889 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
890 * bkey_start_pos(k.k)).
892 delete.k.p = iter.pos;
894 if (iter.flags & BTREE_ITER_IS_EXTENTS) {
895 /* create the biggest key we can */
896 bch2_key_resize(&delete.k, max_sectors);
897 bch2_cut_back(end, &delete.k);
900 ret = bch2_btree_insert_at(c, NULL, journal_seq,
902 BTREE_INSERT_ENTRY(&iter, &delete));
906 bch2_btree_iter_cond_resched(&iter);
909 bch2_btree_iter_unlock(&iter);