1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_update.h"
11 #include "rebalance.h"
13 #include "subvolume.h"
16 #include <linux/sched/signal.h>
18 static inline unsigned bkey_type_to_indirect(const struct bkey *k)
22 return KEY_TYPE_reflink_v;
23 case KEY_TYPE_inline_data:
24 return KEY_TYPE_indirect_inline_data;
30 /* reflink pointers */
32 int bch2_reflink_p_invalid(struct bch_fs *c, struct bkey_s_c k,
33 enum bkey_invalid_flags flags,
36 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
39 bkey_fsck_err_on(le64_to_cpu(p.v->idx) < le32_to_cpu(p.v->front_pad),
40 c, err, reflink_p_front_pad_bad,
41 "idx < front_pad (%llu < %u)",
42 le64_to_cpu(p.v->idx), le32_to_cpu(p.v->front_pad));
47 void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c,
50 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
52 prt_printf(out, "idx %llu front_pad %u back_pad %u",
53 le64_to_cpu(p.v->idx),
54 le32_to_cpu(p.v->front_pad),
55 le32_to_cpu(p.v->back_pad));
58 bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
60 struct bkey_s_reflink_p l = bkey_s_to_reflink_p(_l);
61 struct bkey_s_c_reflink_p r = bkey_s_c_to_reflink_p(_r);
64 * Disabled for now, the triggers code needs to be reworked for merging
65 * of reflink pointers to work:
69 if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx))
72 bch2_key_resize(l.k, l.k->size + r.k->size);
76 static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
77 struct bkey_s_c_reflink_p p,
78 u64 *idx, unsigned flags)
80 struct bch_fs *c = trans->c;
81 struct btree_iter iter;
84 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
85 struct printbuf buf = PRINTBUF;
88 k = bch2_bkey_get_mut_noupdate(trans, &iter,
89 BTREE_ID_reflink, POS(0, *idx),
90 BTREE_ITER_WITH_UPDATES);
91 ret = PTR_ERR_OR_ZERO(k);
95 refcount = bkey_refcount(bkey_i_to_s(k));
97 bch2_bkey_val_to_text(&buf, c, p.s_c);
98 bch2_trans_inconsistent(trans,
99 "nonexistent indirect extent at %llu while marking\n %s",
105 if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
106 bch2_bkey_val_to_text(&buf, c, p.s_c);
107 bch2_trans_inconsistent(trans,
108 "indirect extent refcount underflow at %llu while marking\n %s",
114 if (flags & BTREE_TRIGGER_INSERT) {
115 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
118 pad = max_t(s64, le32_to_cpu(v->front_pad),
119 le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
120 BUG_ON(pad > U32_MAX);
121 v->front_pad = cpu_to_le32(pad);
123 pad = max_t(s64, le32_to_cpu(v->back_pad),
124 k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
125 BUG_ON(pad > U32_MAX);
126 v->back_pad = cpu_to_le32(pad);
129 le64_add_cpu(refcount, add);
131 bch2_btree_iter_set_pos_to_extent_start(&iter);
132 ret = bch2_trans_update(trans, &iter, k, 0);
136 *idx = k->k.p.offset;
138 bch2_trans_iter_exit(trans, &iter);
143 static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans,
144 struct bkey_s_c_reflink_p p,
145 u64 *idx, unsigned flags, size_t r_idx)
147 struct bch_fs *c = trans->c;
148 struct reflink_gc *r;
149 int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
150 u64 start = le64_to_cpu(p.v->idx);
151 u64 end = le64_to_cpu(p.v->idx) + p.k->size;
152 u64 next_idx = end + le32_to_cpu(p.v->back_pad);
154 struct printbuf buf = PRINTBUF;
156 if (r_idx >= c->reflink_gc_nr)
159 r = genradix_ptr(&c->reflink_gc_table, r_idx);
160 next_idx = min(next_idx, r->offset - r->size);
164 BUG_ON((s64) r->refcount + add < 0);
170 if (fsck_err(c, reflink_p_to_missing_reflink_v,
171 "pointer to missing indirect extent\n"
173 " missing range %llu-%llu",
174 (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
176 struct bkey_i *update = bch2_bkey_make_mut_noupdate(trans, p.s_c);
177 ret = PTR_ERR_OR_ZERO(update);
181 if (next_idx <= start) {
182 bkey_i_to_reflink_p(update)->v.front_pad = cpu_to_le32(start - next_idx);
183 } else if (*idx >= end) {
184 bkey_i_to_reflink_p(update)->v.back_pad = cpu_to_le32(*idx - end);
186 bkey_error_init(update);
187 update->k.p = p.k->p;
188 update->k.p.offset = next_idx;
189 update->k.size = next_idx - *idx;
190 set_bkey_val_u64s(&update->k, 0);
193 ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_NORUN);
203 static int __trigger_reflink_p(struct btree_trans *trans,
204 enum btree_id btree_id, unsigned level,
205 struct bkey_s_c k, unsigned flags)
207 struct bch_fs *c = trans->c;
208 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
211 u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
212 u64 end = le64_to_cpu(p.v->idx) + p.k->size + le32_to_cpu(p.v->back_pad);
214 if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
215 while (idx < end && !ret)
216 ret = trans_trigger_reflink_p_segment(trans, p, &idx, flags);
219 if (flags & BTREE_TRIGGER_GC) {
220 size_t l = 0, r = c->reflink_gc_nr;
223 size_t m = l + (r - l) / 2;
224 struct reflink_gc *ref = genradix_ptr(&c->reflink_gc_table, m);
225 if (ref->offset <= idx)
231 while (idx < end && !ret)
232 ret = gc_trigger_reflink_p_segment(trans, p, &idx, flags, l++);
238 int bch2_trigger_reflink_p(struct btree_trans *trans,
239 enum btree_id btree_id, unsigned level,
244 if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
245 (flags & BTREE_TRIGGER_INSERT)) {
246 struct bch_reflink_p *v = bkey_s_to_reflink_p(new).v;
248 v->front_pad = v->back_pad = 0;
251 return trigger_run_overwrite_then_insert(__trigger_reflink_p, trans, btree_id, level, old, new, flags);
254 /* indirect extents */
256 int bch2_reflink_v_invalid(struct bch_fs *c, struct bkey_s_c k,
257 enum bkey_invalid_flags flags,
258 struct printbuf *err)
260 return bch2_bkey_ptrs_invalid(c, k, flags, err);
263 void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c,
266 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
268 prt_printf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount));
270 bch2_bkey_ptrs_to_text(out, c, k);
274 Currently disabled, needs to be debugged:
276 bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
278 struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l);
279 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r);
281 return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r);
285 static inline void check_indirect_extent_deleting(struct bkey_s new, unsigned *flags)
287 if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
288 new.k->type = KEY_TYPE_deleted;
290 set_bkey_val_u64s(new.k, 0);
291 *flags &= ~BTREE_TRIGGER_INSERT;
295 int bch2_trans_mark_reflink_v(struct btree_trans *trans,
296 enum btree_id btree_id, unsigned level,
297 struct bkey_s_c old, struct bkey_s new,
300 if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
301 (flags & BTREE_TRIGGER_INSERT))
302 check_indirect_extent_deleting(new, &flags);
304 if (old.k->type == KEY_TYPE_reflink_v &&
305 new.k->type == KEY_TYPE_reflink_v &&
306 old.k->u64s == new.k->u64s &&
307 !memcmp(bkey_s_c_to_reflink_v(old).v->start,
308 bkey_s_to_reflink_v(new).v->start,
309 bkey_val_bytes(new.k) - 8))
312 return bch2_trigger_extent(trans, btree_id, level, old, new, flags);
315 /* indirect inline data */
317 int bch2_indirect_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
318 enum bkey_invalid_flags flags,
319 struct printbuf *err)
324 void bch2_indirect_inline_data_to_text(struct printbuf *out,
325 struct bch_fs *c, struct bkey_s_c k)
327 struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
328 unsigned datalen = bkey_inline_data_bytes(k.k);
330 prt_printf(out, "refcount %llu datalen %u: %*phN",
331 le64_to_cpu(d.v->refcount), datalen,
332 min(datalen, 32U), d.v->data);
335 int bch2_trans_mark_indirect_inline_data(struct btree_trans *trans,
336 enum btree_id btree_id, unsigned level,
337 struct bkey_s_c old, struct bkey_s new,
340 check_indirect_extent_deleting(new, &flags);
345 static int bch2_make_extent_indirect(struct btree_trans *trans,
346 struct btree_iter *extent_iter,
349 struct bch_fs *c = trans->c;
350 struct btree_iter reflink_iter = { NULL };
353 struct bkey_i_reflink_p *r_p;
357 if (orig->k.type == KEY_TYPE_inline_data)
358 bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
360 bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
362 k = bch2_btree_iter_peek_prev(&reflink_iter);
367 r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k));
368 ret = PTR_ERR_OR_ZERO(r_v);
373 r_v->k.type = bkey_type_to_indirect(&orig->k);
374 r_v->k.p = reflink_iter.pos;
375 bch2_key_resize(&r_v->k, orig->k.size);
376 r_v->k.version = orig->k.version;
378 set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
380 refcount = bkey_refcount(bkey_i_to_s(r_v));
382 memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k));
384 ret = bch2_trans_update(trans, &reflink_iter, r_v, 0);
389 * orig is in a bkey_buf which statically allocates 5 64s for the val,
390 * so we know it will be big enough:
392 orig->k.type = KEY_TYPE_reflink_p;
393 r_p = bkey_i_to_reflink_p(orig);
394 set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
396 /* FORTIFY_SOURCE is broken here, and doesn't provide unsafe_memset() */
397 #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
398 __underlying_memset(&r_p->v, 0, sizeof(r_p->v));
400 memset(&r_p->v, 0, sizeof(r_p->v));
403 r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
405 ret = bch2_trans_update(trans, extent_iter, &r_p->k_i,
406 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
408 bch2_trans_iter_exit(trans, &reflink_iter);
413 static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
418 for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) {
419 if (bkey_extent_is_unwritten(k))
422 if (bkey_extent_is_data(k.k))
426 if (bkey_ge(iter->pos, end))
427 bch2_btree_iter_set_pos(iter, end);
428 return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
431 s64 bch2_remap_range(struct bch_fs *c,
432 subvol_inum dst_inum, u64 dst_offset,
433 subvol_inum src_inum, u64 src_offset,
435 u64 new_i_size, s64 *i_sectors_delta)
437 struct btree_trans *trans;
438 struct btree_iter dst_iter, src_iter;
439 struct bkey_s_c src_k;
440 struct bkey_buf new_dst, new_src;
441 struct bpos dst_start = POS(dst_inum.inum, dst_offset);
442 struct bpos src_start = POS(src_inum.inum, src_offset);
443 struct bpos dst_end = dst_start, src_end = src_start;
444 struct bch_io_opts opts;
445 struct bpos src_want;
447 u32 dst_snapshot, src_snapshot;
448 int ret = 0, ret2 = 0;
450 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_reflink))
451 return -BCH_ERR_erofs_no_writes;
453 bch2_check_set_feature(c, BCH_FEATURE_reflink);
455 dst_end.offset += remap_sectors;
456 src_end.offset += remap_sectors;
458 bch2_bkey_buf_init(&new_dst);
459 bch2_bkey_buf_init(&new_src);
460 trans = bch2_trans_get(c);
462 ret = bch2_inum_opts_get(trans, src_inum, &opts);
466 bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
468 bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
472 bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
473 bkey_lt(dst_iter.pos, dst_end)) {
474 struct disk_reservation disk_res = { 0 };
476 bch2_trans_begin(trans);
478 if (fatal_signal_pending(current)) {
483 ret = bch2_subvolume_get_snapshot(trans, src_inum.subvol,
488 bch2_btree_iter_set_snapshot(&src_iter, src_snapshot);
490 ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol,
495 bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
497 dst_done = dst_iter.pos.offset - dst_start.offset;
498 src_want = POS(src_start.inode, src_start.offset + dst_done);
499 bch2_btree_iter_set_pos(&src_iter, src_want);
501 src_k = get_next_src(&src_iter, src_end);
502 ret = bkey_err(src_k);
506 if (bkey_lt(src_want, src_iter.pos)) {
507 ret = bch2_fpunch_at(trans, &dst_iter, dst_inum,
509 dst_iter.pos.offset +
510 src_iter.pos.offset - src_want.offset),
515 if (src_k.k->type != KEY_TYPE_reflink_p) {
516 bch2_btree_iter_set_pos_to_extent_start(&src_iter);
518 bch2_bkey_buf_reassemble(&new_src, c, src_k);
519 src_k = bkey_i_to_s_c(new_src.k);
521 ret = bch2_make_extent_indirect(trans, &src_iter,
526 BUG_ON(src_k.k->type != KEY_TYPE_reflink_p);
529 if (src_k.k->type == KEY_TYPE_reflink_p) {
530 struct bkey_s_c_reflink_p src_p =
531 bkey_s_c_to_reflink_p(src_k);
532 struct bkey_i_reflink_p *dst_p =
533 bkey_reflink_p_init(new_dst.k);
535 u64 offset = le64_to_cpu(src_p.v->idx) +
537 bkey_start_offset(src_k.k));
539 dst_p->v.idx = cpu_to_le64(offset);
544 new_dst.k->k.p = dst_iter.pos;
545 bch2_key_resize(&new_dst.k->k,
546 min(src_k.k->p.offset - src_want.offset,
547 dst_end.offset - dst_iter.pos.offset));
549 ret = bch2_bkey_set_needs_rebalance(c, new_dst.k,
550 opts.background_target,
551 opts.background_compression) ?:
552 bch2_extent_update(trans, dst_inum, &dst_iter,
553 new_dst.k, &disk_res,
554 new_i_size, i_sectors_delta,
556 bch2_disk_reservation_put(c, &disk_res);
558 bch2_trans_iter_exit(trans, &dst_iter);
559 bch2_trans_iter_exit(trans, &src_iter);
561 BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
562 BUG_ON(bkey_gt(dst_iter.pos, dst_end));
564 dst_done = dst_iter.pos.offset - dst_start.offset;
565 new_i_size = min(dst_iter.pos.offset << 9, new_i_size);
568 struct bch_inode_unpacked inode_u;
569 struct btree_iter inode_iter = { NULL };
571 bch2_trans_begin(trans);
573 ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u,
574 dst_inum, BTREE_ITER_INTENT);
577 inode_u.bi_size < new_i_size) {
578 inode_u.bi_size = new_i_size;
579 ret2 = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
580 bch2_trans_commit(trans, NULL, NULL,
581 BCH_TRANS_COMMIT_no_enospc);
584 bch2_trans_iter_exit(trans, &inode_iter);
585 } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
587 bch2_trans_put(trans);
588 bch2_bkey_buf_exit(&new_src, c);
589 bch2_bkey_buf_exit(&new_dst, c);
591 bch2_write_ref_put(c, BCH_WRITE_REF_reflink);
593 return dst_done ?: ret ?: ret2;