+ bch2_trans_copy_iter(&iter, extent_iter);
+
+ for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, old, ret) {
+ s64 sectors = min(new->k.p.offset, old.k->p.offset) -
+ max(bkey_start_offset(&new->k),
+ bkey_start_offset(old.k));
+
+ *i_sectors_delta += sectors *
+ (bkey_extent_is_allocation(&new->k) -
+ bkey_extent_is_allocation(old.k));
+
+ *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
+ *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
+ ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
+ : 0;
+
+ if (!*usage_increasing &&
+ (new->k.p.snapshot != old.k->p.snapshot ||
+ new_replicas > bch2_bkey_replicas(c, old) ||
+ (!new_compressed && bch2_bkey_sectors_compressed(old))))
+ *usage_increasing = true;
+
+ if (bkey_cmp(old.k->p, new->k.p) >= 0)
+ break;
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_extent_update(struct btree_trans *trans,
+ subvol_inum inum,
+ struct btree_iter *iter,
+ struct bkey_i *k,
+ struct disk_reservation *disk_res,
+ u64 *journal_seq,
+ u64 new_i_size,
+ s64 *i_sectors_delta_total,
+ bool check_enospc)
+{
+ struct btree_iter inode_iter;
+ struct bch_inode_unpacked inode_u;
+ struct bpos next_pos;
+ bool usage_increasing;
+ s64 i_sectors_delta = 0, disk_sectors_delta = 0;
+ int ret;
+
+ /*
+ * This traverses us the iterator without changing iter->path->pos to
+ * search_key() (which is pos + 1 for extents): we want there to be a
+ * path already traversed at iter->pos because
+ * bch2_trans_extent_update() will use it to attempt extent merging
+ */
+ ret = __bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
+
+ ret = bch2_extent_trim_atomic(trans, iter, k);
+ if (ret)
+ return ret;
+
+ new_i_size = min(k->k.p.offset << 9, new_i_size);
+ next_pos = k->k.p;
+
+ ret = bch2_sum_sector_overwrites(trans, iter, k,
+ &usage_increasing,
+ &i_sectors_delta,
+ &disk_sectors_delta);
+ if (ret)
+ return ret;
+
+ if (disk_res &&
+ disk_sectors_delta > (s64) disk_res->sectors) {
+ ret = bch2_disk_reservation_add(trans->c, disk_res,
+ disk_sectors_delta - disk_res->sectors,
+ !check_enospc || !usage_increasing
+ ? BCH_DISK_RESERVATION_NOFAIL : 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inum,
+ BTREE_ITER_INTENT);
+ if (ret)
+ return ret;
+
+ if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
+ new_i_size > inode_u.bi_size)
+ inode_u.bi_size = new_i_size;
+
+ inode_u.bi_sectors += i_sectors_delta;
+
+ ret = bch2_trans_update(trans, iter, k, 0) ?:
+ bch2_inode_write(trans, &inode_iter, &inode_u) ?:
+ bch2_trans_commit(trans, disk_res, journal_seq,
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL);
+ bch2_trans_iter_exit(trans, &inode_iter);
+
+ if (ret)
+ return ret;
+
+ if (i_sectors_delta_total)
+ *i_sectors_delta_total += i_sectors_delta;
+ bch2_btree_iter_set_pos(iter, next_pos);
+
+ return 0;
+}
+
+/*
+ * Returns -EINTR if we had to drop locks:
+ */
+int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
+ subvol_inum inum, u64 end,
+ s64 *i_sectors_delta)
+{
+ struct bch_fs *c = trans->c;
+ unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
+ struct bpos end_pos = POS(inum.inum, end);
+ struct bkey_s_c k;
+ int ret = 0, ret2 = 0;
+ u32 snapshot;
+
+ while (!ret || ret == -EINTR) {
+ struct disk_reservation disk_res =
+ bch2_disk_reservation_init(c, 0);
+ struct bkey_i delete;
+
+ if (ret)
+ ret2 = ret;
+
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ continue;
+
+ bch2_btree_iter_set_snapshot(iter, snapshot);
+
+ k = bch2_btree_iter_peek(iter);
+ if (bkey_cmp(iter->pos, end_pos) >= 0) {
+ bch2_btree_iter_set_pos(iter, end_pos);
+ break;