1 // SPDX-License-Identifier: GPL-2.0
3 * io_misc.c - fallocate, fpunch, truncate:
7 #include "alloc_foreground.h"
9 #include "btree_update.h"
14 #include "extent_update.h"
18 #include "logged_ops.h"
19 #include "rebalance.h"
20 #include "subvolume.h"
22 /* Overwrites whatever was present with zeroes: */
23 int bch2_extent_fallocate(struct btree_trans *trans,
25 struct btree_iter *iter,
27 struct bch_io_opts opts,
29 struct write_point_specifier write_point)
31 struct bch_fs *c = trans->c;
32 struct disk_reservation disk_res = { 0 };
34 struct open_buckets open_buckets = { 0 };
36 struct bkey_buf old, new;
37 unsigned sectors_allocated = 0;
38 bool have_reservation = false;
39 bool unwritten = opts.nocow &&
40 c->sb.version >= bcachefs_metadata_version_unwritten_extents;
43 bch2_bkey_buf_init(&old);
44 bch2_bkey_buf_init(&new);
45 closure_init_stack(&cl);
47 k = bch2_btree_iter_peek_slot(iter);
52 sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
54 if (!have_reservation) {
55 unsigned new_replicas =
56 max(0, (int) opts.data_replicas -
57 (int) bch2_bkey_nr_ptrs_fully_allocated(k));
59 * Get a disk reservation before (in the nocow case) calling
62 ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
66 bch2_bkey_buf_reassemble(&old, c, k);
69 if (have_reservation) {
70 if (!bch2_extents_match(k, bkey_i_to_s_c(old.k)))
73 bch2_key_resize(&new.k->k, sectors);
74 } else if (!unwritten) {
75 struct bkey_i_reservation *reservation;
77 bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
78 reservation = bkey_reservation_init(new.k);
79 reservation->k.p = iter->pos;
80 bch2_key_resize(&reservation->k, sectors);
81 reservation->v.nr_replicas = opts.data_replicas;
83 struct bkey_i_extent *e;
84 struct bch_devs_list devs_have;
85 struct write_point *wp;
86 struct bch_extent_ptr *ptr;
90 bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX);
92 e = bkey_extent_init(new.k);
95 ret = bch2_alloc_sectors_start_trans(trans,
96 opts.foreground_target,
102 BCH_WATERMARK_normal, 0, &cl, &wp);
103 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
104 ret = -BCH_ERR_transaction_restart_nested;
108 sectors = min_t(u64, sectors, wp->sectors_free);
109 sectors_allocated = sectors;
111 bch2_key_resize(&e->k, sectors);
113 bch2_open_bucket_get(c, wp, &open_buckets);
114 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
115 bch2_alloc_sectors_done(c, wp);
117 extent_for_each_ptr(extent_i_to_s(e), ptr)
118 ptr->unwritten = true;
121 have_reservation = true;
123 ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
124 0, i_sectors_delta, true);
126 if (!ret && sectors_allocated)
127 bch2_increment_clock(c, sectors_allocated, WRITE);
129 bch2_open_buckets_put(c, &open_buckets);
130 bch2_disk_reservation_put(c, &disk_res);
131 bch2_bkey_buf_exit(&new, c);
132 bch2_bkey_buf_exit(&old, c);
134 if (closure_nr_remaining(&cl) != 1) {
135 bch2_trans_unlock(trans);
143 * Returns -BCH_ERR_transacton_restart if we had to drop locks:
145 int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
146 subvol_inum inum, u64 end,
147 s64 *i_sectors_delta)
149 struct bch_fs *c = trans->c;
150 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
151 struct bpos end_pos = POS(inum.inum, end);
153 int ret = 0, ret2 = 0;
157 bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
158 struct disk_reservation disk_res =
159 bch2_disk_reservation_init(c, 0);
160 struct bkey_i delete;
165 bch2_trans_begin(trans);
167 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
171 bch2_btree_iter_set_snapshot(iter, snapshot);
174 * peek_upto() doesn't have ideal semantics for extents:
176 k = bch2_btree_iter_peek_upto(iter, end_pos);
184 bkey_init(&delete.k);
185 delete.k.p = iter->pos;
187 /* create the biggest key we can */
188 bch2_key_resize(&delete.k, max_sectors);
189 bch2_cut_back(end_pos, &delete);
191 ret = bch2_extent_update(trans, inum, iter, &delete,
192 &disk_res, 0, i_sectors_delta, false);
193 bch2_disk_reservation_put(c, &disk_res);
199 int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
200 s64 *i_sectors_delta)
202 struct btree_trans *trans = bch2_trans_get(c);
203 struct btree_iter iter;
206 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
207 POS(inum.inum, start),
210 ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta);
212 bch2_trans_iter_exit(trans, &iter);
213 bch2_trans_put(trans);
215 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
223 void bch2_logged_op_truncate_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
225 struct bkey_s_c_logged_op_truncate op = bkey_s_c_to_logged_op_truncate(k);
227 prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
228 prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
229 prt_printf(out, " new_i_size=%llu", le64_to_cpu(op.v->new_i_size));
232 static int truncate_set_isize(struct btree_trans *trans,
236 struct btree_iter iter = { NULL };
237 struct bch_inode_unpacked inode_u;
240 ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT) ?:
241 (inode_u.bi_size = new_i_size, 0) ?:
242 bch2_inode_write(trans, &iter, &inode_u);
244 bch2_trans_iter_exit(trans, &iter);
248 static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
250 u64 *i_sectors_delta)
252 struct bch_fs *c = trans->c;
253 struct btree_iter fpunch_iter;
254 struct bkey_i_logged_op_truncate *op = bkey_i_to_logged_op_truncate(op_k);
255 subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
256 u64 new_i_size = le64_to_cpu(op->v.new_i_size);
259 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
260 truncate_set_isize(trans, inum, new_i_size));
264 bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents,
265 POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
267 ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
268 bch2_trans_iter_exit(trans, &fpunch_iter);
270 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
273 bch2_logged_op_finish(trans, op_k);
277 int bch2_resume_logged_op_truncate(struct btree_trans *trans, struct bkey_i *op_k)
279 return __bch2_resume_logged_op_truncate(trans, op_k, NULL);
282 int bch2_truncate(struct bch_fs *c, subvol_inum inum, u64 new_i_size, u64 *i_sectors_delta)
284 struct bkey_i_logged_op_truncate op;
286 bkey_logged_op_truncate_init(&op.k_i);
287 op.v.subvol = cpu_to_le32(inum.subvol);
288 op.v.inum = cpu_to_le64(inum.inum);
289 op.v.new_i_size = cpu_to_le64(new_i_size);
292 * Logged ops aren't atomic w.r.t. snapshot creation: creating a
293 * snapshot while they're in progress, then crashing, will result in the
294 * resume only proceeding in one of the snapshots
296 down_read(&c->snapshot_create_lock);
297 int ret = bch2_trans_run(c,
298 bch2_logged_op_start(trans, &op.k_i) ?:
299 __bch2_resume_logged_op_truncate(trans, &op.k_i, i_sectors_delta));
300 up_read(&c->snapshot_create_lock);
305 /* finsert/fcollapse: */
307 void bch2_logged_op_finsert_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
309 struct bkey_s_c_logged_op_finsert op = bkey_s_c_to_logged_op_finsert(k);
311 prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
312 prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
313 prt_printf(out, " dst_offset=%lli", le64_to_cpu(op.v->dst_offset));
314 prt_printf(out, " src_offset=%llu", le64_to_cpu(op.v->src_offset));
317 static int adjust_i_size(struct btree_trans *trans, subvol_inum inum, u64 offset, s64 len)
319 struct btree_iter iter;
320 struct bch_inode_unpacked inode_u;
326 ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT);
331 if (MAX_LFS_FILESIZE - inode_u.bi_size < len) {
336 if (offset >= inode_u.bi_size) {
342 inode_u.bi_size += len;
343 inode_u.bi_mtime = inode_u.bi_ctime = bch2_current_time(trans->c);
345 ret = bch2_inode_write(trans, &iter, &inode_u);
347 bch2_trans_iter_exit(trans, &iter);
351 static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
353 u64 *i_sectors_delta)
355 struct bch_fs *c = trans->c;
356 struct btree_iter iter;
357 struct bkey_i_logged_op_finsert *op = bkey_i_to_logged_op_finsert(op_k);
358 subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
359 struct bch_io_opts opts;
360 u64 dst_offset = le64_to_cpu(op->v.dst_offset);
361 u64 src_offset = le64_to_cpu(op->v.src_offset);
362 s64 shift = dst_offset - src_offset;
363 u64 len = abs(shift);
364 u64 pos = le64_to_cpu(op->v.pos);
365 bool insert = shift > 0;
368 ret = bch2_inum_opts_get(trans, inum, &opts);
372 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
376 switch (op->v.state) {
377 case LOGGED_OP_FINSERT_start:
378 op->v.state = LOGGED_OP_FINSERT_shift_extents;
381 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
382 adjust_i_size(trans, inum, src_offset, len) ?:
383 bch2_logged_op_update(trans, &op->k_i));
387 bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset));
389 ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta);
390 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
393 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
394 bch2_logged_op_update(trans, &op->k_i));
398 case LOGGED_OP_FINSERT_shift_extents:
400 struct disk_reservation disk_res =
401 bch2_disk_reservation_init(c, 0);
402 struct bkey_i delete, *copy;
404 struct bpos src_pos = POS(inum.inum, src_offset);
407 bch2_trans_begin(trans);
409 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
413 bch2_btree_iter_set_snapshot(&iter, snapshot);
414 bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot));
417 ? bch2_btree_iter_peek_prev(&iter)
418 : bch2_btree_iter_peek_upto(&iter, POS(inum.inum, U64_MAX));
419 if ((ret = bkey_err(k)))
423 k.k->p.inode != inum.inum ||
424 bkey_le(k.k->p, POS(inum.inum, src_offset)))
427 copy = bch2_bkey_make_mut_noupdate(trans, k);
428 if ((ret = PTR_ERR_OR_ZERO(copy)))
432 bkey_lt(bkey_start_pos(k.k), src_pos)) {
433 bch2_cut_front(src_pos, copy);
435 /* Splitting compressed extent? */
436 bch2_disk_reservation_add(c, &disk_res,
438 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy)),
439 BCH_DISK_RESERVATION_NOFAIL);
442 bkey_init(&delete.k);
443 delete.k.p = copy->k.p;
444 delete.k.p.snapshot = snapshot;
445 delete.k.size = copy->k.size;
447 copy->k.p.offset += shift;
448 copy->k.p.snapshot = snapshot;
450 op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset);
452 ret = bch2_bkey_set_needs_rebalance(c, copy,
453 opts.background_target,
454 opts.background_compression) ?:
455 bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
456 bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?:
457 bch2_logged_op_update(trans, &op->k_i) ?:
458 bch2_trans_commit(trans, &disk_res, NULL, BCH_TRANS_COMMIT_no_enospc);
460 bch2_disk_reservation_put(c, &disk_res);
462 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
467 pos = le64_to_cpu(op->v.pos);
470 op->v.state = LOGGED_OP_FINSERT_finish;
473 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
474 adjust_i_size(trans, inum, src_offset, shift) ?:
475 bch2_logged_op_update(trans, &op->k_i));
477 /* We need an inode update to update bi_journal_seq for fsync: */
478 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
479 adjust_i_size(trans, inum, 0, 0) ?:
480 bch2_logged_op_update(trans, &op->k_i));
484 case LOGGED_OP_FINSERT_finish:
488 bch2_logged_op_finish(trans, op_k);
489 bch2_trans_iter_exit(trans, &iter);
493 int bch2_resume_logged_op_finsert(struct btree_trans *trans, struct bkey_i *op_k)
495 return __bch2_resume_logged_op_finsert(trans, op_k, NULL);
498 int bch2_fcollapse_finsert(struct bch_fs *c, subvol_inum inum,
499 u64 offset, u64 len, bool insert,
500 s64 *i_sectors_delta)
502 struct bkey_i_logged_op_finsert op;
503 s64 shift = insert ? len : -len;
505 bkey_logged_op_finsert_init(&op.k_i);
506 op.v.subvol = cpu_to_le32(inum.subvol);
507 op.v.inum = cpu_to_le64(inum.inum);
508 op.v.dst_offset = cpu_to_le64(offset + shift);
509 op.v.src_offset = cpu_to_le64(offset);
510 op.v.pos = cpu_to_le64(insert ? U64_MAX : offset);
513 * Logged ops aren't atomic w.r.t. snapshot creation: creating a
514 * snapshot while they're in progress, then crashing, will result in the
515 * resume only proceeding in one of the snapshots
517 down_read(&c->snapshot_create_lock);
518 int ret = bch2_trans_run(c,
519 bch2_logged_op_start(trans, &op.k_i) ?:
520 __bch2_resume_logged_op_finsert(trans, &op.k_i, i_sectors_delta));
521 up_read(&c->snapshot_create_lock);