1 // SPDX-License-Identifier: GPL-2.0
3 #include "bkey_on_stack.h"
4 #include "btree_update.h"
10 #include <linux/sched/signal.h>
12 static inline unsigned bkey_type_to_indirect(const struct bkey *k)
16 return KEY_TYPE_reflink_v;
17 case KEY_TYPE_inline_data:
18 return KEY_TYPE_indirect_inline_data;
24 /* reflink pointers */
26 const char *bch2_reflink_p_invalid(const struct bch_fs *c, struct bkey_s_c k)
28 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
30 if (bkey_val_bytes(p.k) != sizeof(*p.v))
31 return "incorrect value size";
36 void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c,
39 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
41 pr_buf(out, "idx %llu", le64_to_cpu(p.v->idx));
44 enum merge_result bch2_reflink_p_merge(struct bch_fs *c,
45 struct bkey_s _l, struct bkey_s _r)
47 struct bkey_s_reflink_p l = bkey_s_to_reflink_p(_l);
48 struct bkey_s_reflink_p r = bkey_s_to_reflink_p(_r);
50 if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx))
51 return BCH_MERGE_NOMERGE;
53 if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
54 bch2_key_resize(l.k, KEY_SIZE_MAX);
55 bch2_cut_front_s(l.k->p, _r);
56 return BCH_MERGE_PARTIAL;
59 bch2_key_resize(l.k, l.k->size + r.k->size);
61 return BCH_MERGE_MERGE;
64 /* indirect extents */
66 const char *bch2_reflink_v_invalid(const struct bch_fs *c, struct bkey_s_c k)
68 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
70 if (bkey_val_bytes(r.k) < sizeof(*r.v))
71 return "incorrect value size";
73 return bch2_bkey_ptrs_invalid(c, k);
76 void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c,
79 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
81 pr_buf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount));
83 bch2_bkey_ptrs_to_text(out, c, k);
86 /* indirect inline data */
88 const char *bch2_indirect_inline_data_invalid(const struct bch_fs *c,
91 if (bkey_val_bytes(k.k) < sizeof(struct bch_indirect_inline_data))
92 return "incorrect value size";
96 void bch2_indirect_inline_data_to_text(struct printbuf *out,
97 struct bch_fs *c, struct bkey_s_c k)
99 struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
100 unsigned datalen = bkey_inline_data_bytes(k.k);
102 pr_buf(out, "refcount %llu datalen %u: %*phN",
103 le64_to_cpu(d.v->refcount), datalen,
104 min(datalen, 32U), d.v->data);
107 static int bch2_make_extent_indirect(struct btree_trans *trans,
108 struct btree_iter *extent_iter,
111 struct bch_fs *c = trans->c;
112 struct btree_iter *reflink_iter;
115 struct bkey_i_reflink_p *r_p;
119 if (orig->k.type == KEY_TYPE_inline_data)
120 bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
122 for_each_btree_key(trans, reflink_iter, BTREE_ID_REFLINK,
123 POS(0, c->reflink_hint),
124 BTREE_ITER_INTENT|BTREE_ITER_SLOTS, k, ret) {
125 if (reflink_iter->pos.inode) {
126 bch2_btree_iter_set_pos(reflink_iter, POS_MIN);
130 if (bkey_deleted(k.k) && orig->k.size <= k.k->size)
137 /* rewind iter to start of hole, if necessary: */
138 bch2_btree_iter_set_pos(reflink_iter, bkey_start_pos(k.k));
140 r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_val_bytes(&orig->k));
141 ret = PTR_ERR_OR_ZERO(r_v);
146 r_v->k.type = bkey_type_to_indirect(&orig->k);
147 r_v->k.p = reflink_iter->pos;
148 bch2_key_resize(&r_v->k, orig->k.size);
149 r_v->k.version = orig->k.version;
151 set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
153 refcount = (void *) &r_v->v;
155 memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k));
157 bch2_trans_update(trans, reflink_iter, r_v, 0);
159 r_p = bch2_trans_kmalloc(trans, sizeof(*r_p));
163 orig->k.type = KEY_TYPE_reflink_p;
164 r_p = bkey_i_to_reflink_p(orig);
165 set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
166 r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
168 bch2_trans_update(trans, extent_iter, &r_p->k_i, 0);
170 if (!IS_ERR(reflink_iter))
171 c->reflink_hint = reflink_iter->pos.offset;
172 bch2_trans_iter_put(trans, reflink_iter);
177 static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
179 struct bkey_s_c k = bch2_btree_iter_peek(iter);
182 for_each_btree_key_continue(iter, 0, k, ret) {
183 if (bkey_cmp(iter->pos, end) >= 0)
184 return bkey_s_c_null;
186 if (bkey_extent_is_data(k.k))
193 s64 bch2_remap_range(struct bch_fs *c,
194 struct bpos dst_start, struct bpos src_start,
195 u64 remap_sectors, u64 *journal_seq,
196 u64 new_i_size, s64 *i_sectors_delta)
198 struct btree_trans trans;
199 struct btree_iter *dst_iter, *src_iter;
200 struct bkey_s_c src_k;
201 BKEY_PADDED(k) new_dst;
202 struct bkey_on_stack new_src;
203 struct bpos dst_end = dst_start, src_end = src_start;
204 struct bpos dst_want, src_want;
205 u64 src_done, dst_done;
206 int ret = 0, ret2 = 0;
208 if (!c->opts.reflink)
211 if (!percpu_ref_tryget(&c->writes))
214 bch2_check_set_feature(c, BCH_FEATURE_reflink);
216 dst_end.offset += remap_sectors;
217 src_end.offset += remap_sectors;
219 bkey_on_stack_init(&new_src);
220 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 4096);
222 src_iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, src_start,
224 dst_iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, dst_start,
228 bch2_trans_begin(&trans);
232 if (fatal_signal_pending(current)) {
237 src_k = get_next_src(src_iter, src_end);
238 ret = bkey_err(src_k);
242 src_done = bpos_min(src_iter->pos, src_end).offset -
244 dst_want = POS(dst_start.inode, dst_start.offset + src_done);
246 if (bkey_cmp(dst_iter->pos, dst_want) < 0) {
247 ret = bch2_fpunch_at(&trans, dst_iter, dst_want,
248 journal_seq, i_sectors_delta);
254 BUG_ON(bkey_cmp(dst_iter->pos, dst_want));
256 if (!bkey_cmp(dst_iter->pos, dst_end))
259 if (src_k.k->type != KEY_TYPE_reflink_p) {
260 bkey_on_stack_reassemble(&new_src, c, src_k);
261 src_k = bkey_i_to_s_c(new_src.k);
263 bch2_cut_front(src_iter->pos, new_src.k);
264 bch2_cut_back(src_end, new_src.k);
266 ret = bch2_make_extent_indirect(&trans, src_iter,
271 BUG_ON(src_k.k->type != KEY_TYPE_reflink_p);
274 if (src_k.k->type == KEY_TYPE_reflink_p) {
275 struct bkey_s_c_reflink_p src_p =
276 bkey_s_c_to_reflink_p(src_k);
277 struct bkey_i_reflink_p *dst_p =
278 bkey_reflink_p_init(&new_dst.k);
280 u64 offset = le64_to_cpu(src_p.v->idx) +
281 (src_iter->pos.offset -
282 bkey_start_offset(src_k.k));
284 dst_p->v.idx = cpu_to_le64(offset);
289 new_dst.k.k.p = dst_iter->pos;
290 bch2_key_resize(&new_dst.k.k,
291 min(src_k.k->p.offset - src_iter->pos.offset,
292 dst_end.offset - dst_iter->pos.offset));
294 ret = bch2_extent_update(&trans, dst_iter, &new_dst.k,
296 new_i_size, i_sectors_delta);
300 dst_done = dst_iter->pos.offset - dst_start.offset;
301 src_want = POS(src_start.inode, src_start.offset + dst_done);
302 bch2_btree_iter_set_pos(src_iter, src_want);
310 BUG_ON(bkey_cmp(dst_iter->pos, dst_end));
312 BUG_ON(bkey_cmp(dst_iter->pos, dst_end) > 0);
314 dst_done = dst_iter->pos.offset - dst_start.offset;
315 new_i_size = min(dst_iter->pos.offset << 9, new_i_size);
317 bch2_trans_begin(&trans);
320 struct bch_inode_unpacked inode_u;
321 struct btree_iter *inode_iter;
323 inode_iter = bch2_inode_peek(&trans, &inode_u,
324 dst_start.inode, BTREE_ITER_INTENT);
325 ret2 = PTR_ERR_OR_ZERO(inode_iter);
328 inode_u.bi_size < new_i_size) {
329 inode_u.bi_size = new_i_size;
330 ret2 = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
331 bch2_trans_commit(&trans, NULL, journal_seq, 0);
333 } while (ret2 == -EINTR);
335 ret = bch2_trans_exit(&trans) ?: ret;
336 bkey_on_stack_exit(&new_src, c);
338 percpu_ref_put(&c->writes);
340 return dst_done ?: ret ?: ret2;