1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_update.h"
11 #include "rebalance.h"
13 #include "subvolume.h"
16 #include <linux/sched/signal.h>
18 static inline unsigned bkey_type_to_indirect(const struct bkey *k)
22 return KEY_TYPE_reflink_v;
23 case KEY_TYPE_inline_data:
24 return KEY_TYPE_indirect_inline_data;
30 /* reflink pointers */
32 int bch2_reflink_p_invalid(struct bch_fs *c, struct bkey_s_c k,
33 enum bkey_invalid_flags flags,
36 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
38 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix &&
39 le64_to_cpu(p.v->idx) < le32_to_cpu(p.v->front_pad)) {
40 prt_printf(err, "idx < front_pad (%llu < %u)",
41 le64_to_cpu(p.v->idx), le32_to_cpu(p.v->front_pad));
48 void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c,
51 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
53 prt_printf(out, "idx %llu front_pad %u back_pad %u",
54 le64_to_cpu(p.v->idx),
55 le32_to_cpu(p.v->front_pad),
56 le32_to_cpu(p.v->back_pad));
59 bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
61 struct bkey_s_reflink_p l = bkey_s_to_reflink_p(_l);
62 struct bkey_s_c_reflink_p r = bkey_s_c_to_reflink_p(_r);
65 * Disabled for now, the triggers code needs to be reworked for merging
66 * of reflink pointers to work:
70 if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx))
73 bch2_key_resize(l.k, l.k->size + r.k->size);
77 static int trans_mark_reflink_p_segment(struct btree_trans *trans,
78 struct bkey_s_c_reflink_p p,
79 u64 *idx, unsigned flags)
81 struct bch_fs *c = trans->c;
82 struct btree_iter iter;
85 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
86 struct printbuf buf = PRINTBUF;
89 k = bch2_bkey_get_mut_noupdate(trans, &iter,
90 BTREE_ID_reflink, POS(0, *idx),
91 BTREE_ITER_WITH_UPDATES);
92 ret = PTR_ERR_OR_ZERO(k);
96 refcount = bkey_refcount(k);
98 bch2_bkey_val_to_text(&buf, c, p.s_c);
99 bch2_trans_inconsistent(trans,
100 "nonexistent indirect extent at %llu while marking\n %s",
106 if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
107 bch2_bkey_val_to_text(&buf, c, p.s_c);
108 bch2_trans_inconsistent(trans,
109 "indirect extent refcount underflow at %llu while marking\n %s",
115 if (flags & BTREE_TRIGGER_INSERT) {
116 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
119 pad = max_t(s64, le32_to_cpu(v->front_pad),
120 le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
121 BUG_ON(pad > U32_MAX);
122 v->front_pad = cpu_to_le32(pad);
124 pad = max_t(s64, le32_to_cpu(v->back_pad),
125 k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
126 BUG_ON(pad > U32_MAX);
127 v->back_pad = cpu_to_le32(pad);
130 le64_add_cpu(refcount, add);
132 bch2_btree_iter_set_pos_to_extent_start(&iter);
133 ret = bch2_trans_update(trans, &iter, k, 0);
137 *idx = k->k.p.offset;
139 bch2_trans_iter_exit(trans, &iter);
144 static int __trans_mark_reflink_p(struct btree_trans *trans,
145 enum btree_id btree_id, unsigned level,
146 struct bkey_s_c k, unsigned flags)
148 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
152 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
153 end_idx = le64_to_cpu(p.v->idx) + p.k->size +
154 le32_to_cpu(p.v->back_pad);
156 while (idx < end_idx && !ret)
157 ret = trans_mark_reflink_p_segment(trans, p, &idx, flags);
161 int bch2_trans_mark_reflink_p(struct btree_trans *trans,
162 enum btree_id btree_id, unsigned level,
167 if (flags & BTREE_TRIGGER_INSERT) {
168 struct bch_reflink_p *v = &bkey_i_to_reflink_p(new)->v;
170 v->front_pad = v->back_pad = 0;
173 return trigger_run_overwrite_then_insert(__trans_mark_reflink_p, trans, btree_id, level, old, new, flags);
176 static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
177 struct bkey_s_c_reflink_p p,
179 u64 *idx, unsigned flags, size_t r_idx)
181 struct bch_fs *c = trans->c;
182 struct reflink_gc *r;
183 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
186 struct printbuf buf = PRINTBUF;
188 if (r_idx >= c->reflink_gc_nr)
191 r = genradix_ptr(&c->reflink_gc_table, r_idx);
192 next_idx = min(next_idx, r->offset - r->size);
196 BUG_ON((s64) r->refcount + add < 0);
202 if (fsck_err(c, reflink_p_to_missing_reflink_v,
203 "pointer to missing indirect extent\n"
205 " missing range %llu-%llu",
206 (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
208 struct bkey_i_error *new;
210 new = bch2_trans_kmalloc(trans, sizeof(*new));
211 ret = PTR_ERR_OR_ZERO(new);
216 new->k.type = KEY_TYPE_error;
217 new->k.p = bkey_start_pos(p.k);
218 new->k.p.offset += *idx - start;
219 bch2_key_resize(&new->k, next_idx - *idx);
220 ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i,
221 BTREE_TRIGGER_NORUN);
231 static int __mark_reflink_p(struct btree_trans *trans,
232 enum btree_id btree_id, unsigned level,
233 struct bkey_s_c k, unsigned flags)
235 struct bch_fs *c = trans->c;
236 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
237 struct reflink_gc *ref;
239 u64 idx = le64_to_cpu(p.v->idx), start = idx;
240 u64 end = le64_to_cpu(p.v->idx) + p.k->size;
243 BUG_ON(!(flags & BTREE_TRIGGER_GC));
245 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_reflink_p_fix) {
246 idx -= le32_to_cpu(p.v->front_pad);
247 end += le32_to_cpu(p.v->back_pad);
251 r = c->reflink_gc_nr;
255 ref = genradix_ptr(&c->reflink_gc_table, m);
256 if (ref->offset <= idx)
262 while (idx < end && !ret)
263 ret = __bch2_mark_reflink_p(trans, p, start, end,
269 int bch2_mark_reflink_p(struct btree_trans *trans,
270 enum btree_id btree_id, unsigned level,
271 struct bkey_s_c old, struct bkey_s_c new,
274 return mem_trigger_run_overwrite_then_insert(__mark_reflink_p, trans, btree_id, level, old, new, flags);
277 /* indirect extents */
279 int bch2_reflink_v_invalid(struct bch_fs *c, struct bkey_s_c k,
280 enum bkey_invalid_flags flags,
281 struct printbuf *err)
283 return bch2_bkey_ptrs_invalid(c, k, flags, err);
286 void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c,
289 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
291 prt_printf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount));
293 bch2_bkey_ptrs_to_text(out, c, k);
297 Currently disabled, needs to be debugged:
299 bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
301 struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l);
302 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r);
304 return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r);
308 static inline void check_indirect_extent_deleting(struct bkey_i *new, unsigned *flags)
310 if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
311 new->k.type = KEY_TYPE_deleted;
313 set_bkey_val_u64s(&new->k, 0);
314 *flags &= ~BTREE_TRIGGER_INSERT;
318 int bch2_trans_mark_reflink_v(struct btree_trans *trans,
319 enum btree_id btree_id, unsigned level,
320 struct bkey_s_c old, struct bkey_i *new,
323 check_indirect_extent_deleting(new, &flags);
325 if (old.k->type == KEY_TYPE_reflink_v &&
326 new->k.type == KEY_TYPE_reflink_v &&
327 old.k->u64s == new->k.u64s &&
328 !memcmp(bkey_s_c_to_reflink_v(old).v->start,
329 bkey_i_to_reflink_v(new)->v.start,
330 bkey_val_bytes(&new->k) - 8))
333 return bch2_trans_mark_extent(trans, btree_id, level, old, new, flags);
336 /* indirect inline data */
338 int bch2_indirect_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
339 enum bkey_invalid_flags flags,
340 struct printbuf *err)
345 void bch2_indirect_inline_data_to_text(struct printbuf *out,
346 struct bch_fs *c, struct bkey_s_c k)
348 struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
349 unsigned datalen = bkey_inline_data_bytes(k.k);
351 prt_printf(out, "refcount %llu datalen %u: %*phN",
352 le64_to_cpu(d.v->refcount), datalen,
353 min(datalen, 32U), d.v->data);
356 int bch2_trans_mark_indirect_inline_data(struct btree_trans *trans,
357 enum btree_id btree_id, unsigned level,
358 struct bkey_s_c old, struct bkey_i *new,
361 check_indirect_extent_deleting(new, &flags);
366 static int bch2_make_extent_indirect(struct btree_trans *trans,
367 struct btree_iter *extent_iter,
370 struct bch_fs *c = trans->c;
371 struct btree_iter reflink_iter = { NULL };
374 struct bkey_i_reflink_p *r_p;
378 if (orig->k.type == KEY_TYPE_inline_data)
379 bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
381 bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
383 k = bch2_btree_iter_peek_prev(&reflink_iter);
388 r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k));
389 ret = PTR_ERR_OR_ZERO(r_v);
394 r_v->k.type = bkey_type_to_indirect(&orig->k);
395 r_v->k.p = reflink_iter.pos;
396 bch2_key_resize(&r_v->k, orig->k.size);
397 r_v->k.version = orig->k.version;
399 set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
401 refcount = bkey_refcount(r_v);
403 memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k));
405 ret = bch2_trans_update(trans, &reflink_iter, r_v, 0);
410 * orig is in a bkey_buf which statically allocates 5 64s for the val,
411 * so we know it will be big enough:
413 orig->k.type = KEY_TYPE_reflink_p;
414 r_p = bkey_i_to_reflink_p(orig);
415 set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
417 /* FORTIFY_SOURCE is broken here, and doesn't provide unsafe_memset() */
418 #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
419 __underlying_memset(&r_p->v, 0, sizeof(r_p->v));
421 memset(&r_p->v, 0, sizeof(r_p->v));
424 r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
426 ret = bch2_trans_update(trans, extent_iter, &r_p->k_i,
427 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
429 bch2_trans_iter_exit(trans, &reflink_iter);
434 static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
439 for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) {
440 if (bkey_extent_is_unwritten(k))
443 if (bkey_extent_is_data(k.k))
447 if (bkey_ge(iter->pos, end))
448 bch2_btree_iter_set_pos(iter, end);
449 return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
452 s64 bch2_remap_range(struct bch_fs *c,
453 subvol_inum dst_inum, u64 dst_offset,
454 subvol_inum src_inum, u64 src_offset,
456 u64 new_i_size, s64 *i_sectors_delta)
458 struct btree_trans *trans;
459 struct btree_iter dst_iter, src_iter;
460 struct bkey_s_c src_k;
461 struct bkey_buf new_dst, new_src;
462 struct bpos dst_start = POS(dst_inum.inum, dst_offset);
463 struct bpos src_start = POS(src_inum.inum, src_offset);
464 struct bpos dst_end = dst_start, src_end = src_start;
465 struct bch_io_opts opts;
466 struct bpos src_want;
468 u32 dst_snapshot, src_snapshot;
469 int ret = 0, ret2 = 0;
471 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_reflink))
472 return -BCH_ERR_erofs_no_writes;
474 bch2_check_set_feature(c, BCH_FEATURE_reflink);
476 dst_end.offset += remap_sectors;
477 src_end.offset += remap_sectors;
479 bch2_bkey_buf_init(&new_dst);
480 bch2_bkey_buf_init(&new_src);
481 trans = bch2_trans_get(c);
483 ret = bch2_inum_opts_get(trans, src_inum, &opts);
487 bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
489 bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
493 bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
494 bkey_lt(dst_iter.pos, dst_end)) {
495 struct disk_reservation disk_res = { 0 };
497 bch2_trans_begin(trans);
499 if (fatal_signal_pending(current)) {
504 ret = bch2_subvolume_get_snapshot(trans, src_inum.subvol,
509 bch2_btree_iter_set_snapshot(&src_iter, src_snapshot);
511 ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol,
516 bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
518 dst_done = dst_iter.pos.offset - dst_start.offset;
519 src_want = POS(src_start.inode, src_start.offset + dst_done);
520 bch2_btree_iter_set_pos(&src_iter, src_want);
522 src_k = get_next_src(&src_iter, src_end);
523 ret = bkey_err(src_k);
527 if (bkey_lt(src_want, src_iter.pos)) {
528 ret = bch2_fpunch_at(trans, &dst_iter, dst_inum,
530 dst_iter.pos.offset +
531 src_iter.pos.offset - src_want.offset),
536 if (src_k.k->type != KEY_TYPE_reflink_p) {
537 bch2_btree_iter_set_pos_to_extent_start(&src_iter);
539 bch2_bkey_buf_reassemble(&new_src, c, src_k);
540 src_k = bkey_i_to_s_c(new_src.k);
542 ret = bch2_make_extent_indirect(trans, &src_iter,
547 BUG_ON(src_k.k->type != KEY_TYPE_reflink_p);
550 if (src_k.k->type == KEY_TYPE_reflink_p) {
551 struct bkey_s_c_reflink_p src_p =
552 bkey_s_c_to_reflink_p(src_k);
553 struct bkey_i_reflink_p *dst_p =
554 bkey_reflink_p_init(new_dst.k);
556 u64 offset = le64_to_cpu(src_p.v->idx) +
558 bkey_start_offset(src_k.k));
560 dst_p->v.idx = cpu_to_le64(offset);
565 new_dst.k->k.p = dst_iter.pos;
566 bch2_key_resize(&new_dst.k->k,
567 min(src_k.k->p.offset - src_want.offset,
568 dst_end.offset - dst_iter.pos.offset));
570 ret = bch2_bkey_set_needs_rebalance(c, new_dst.k,
571 opts.background_target,
572 opts.background_compression) ?:
573 bch2_extent_update(trans, dst_inum, &dst_iter,
574 new_dst.k, &disk_res,
575 new_i_size, i_sectors_delta,
577 bch2_disk_reservation_put(c, &disk_res);
579 bch2_trans_iter_exit(trans, &dst_iter);
580 bch2_trans_iter_exit(trans, &src_iter);
582 BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
583 BUG_ON(bkey_gt(dst_iter.pos, dst_end));
585 dst_done = dst_iter.pos.offset - dst_start.offset;
586 new_i_size = min(dst_iter.pos.offset << 9, new_i_size);
589 struct bch_inode_unpacked inode_u;
590 struct btree_iter inode_iter = { NULL };
592 bch2_trans_begin(trans);
594 ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u,
595 dst_inum, BTREE_ITER_INTENT);
598 inode_u.bi_size < new_i_size) {
599 inode_u.bi_size = new_i_size;
600 ret2 = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
601 bch2_trans_commit(trans, NULL, NULL,
602 BCH_TRANS_COMMIT_no_enospc);
605 bch2_trans_iter_exit(trans, &inode_iter);
606 } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
608 bch2_trans_put(trans);
609 bch2_bkey_buf_exit(&new_src, c);
610 bch2_bkey_buf_exit(&new_dst, c);
612 bch2_write_ref_put(c, BCH_WRITE_REF_reflink);
614 return dst_done ?: ret ?: ret2;