1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
6 #include "btree_update.h"
8 #include "data_update.h"
15 #include "nocow_locking.h"
16 #include "rebalance.h"
17 #include "subvolume.h"
20 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
22 if (trace_move_extent_finish_enabled()) {
23 struct printbuf buf = PRINTBUF;
25 bch2_bkey_val_to_text(&buf, c, k);
26 trace_move_extent_finish(c, buf.buf);
31 static void trace_move_extent_fail2(struct data_update *m,
33 struct bkey_s_c wrote,
34 struct bkey_i *insert,
37 struct bch_fs *c = m->op.c;
38 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
39 const union bch_extent_entry *entry;
40 struct bch_extent_ptr *ptr;
41 struct extent_ptr_decoded p;
42 struct printbuf buf = PRINTBUF;
43 unsigned i, rewrites_found = 0;
45 if (!trace_move_extent_fail_enabled())
52 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
53 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
54 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
56 rewrites_found |= 1U << i;
61 prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
62 (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
63 (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
64 (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
65 (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
67 prt_printf(&buf, "\nrewrites found: %u%u%u%u",
68 (rewrites_found & (1 << 0)) != 0,
69 (rewrites_found & (1 << 1)) != 0,
70 (rewrites_found & (1 << 2)) != 0,
71 (rewrites_found & (1 << 3)) != 0);
73 prt_str(&buf, "\nold: ");
74 bch2_bkey_val_to_text(&buf, c, old);
76 prt_str(&buf, "\nnew: ");
77 bch2_bkey_val_to_text(&buf, c, new);
79 prt_str(&buf, "\nwrote: ");
80 bch2_bkey_val_to_text(&buf, c, wrote);
83 prt_str(&buf, "\ninsert: ");
84 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
87 trace_move_extent_fail(c, buf.buf);
91 static int __bch2_data_update_index_update(struct btree_trans *trans,
92 struct bch_write_op *op)
94 struct bch_fs *c = op->c;
95 struct btree_iter iter;
96 struct data_update *m =
97 container_of(op, struct data_update, op);
98 struct keylist *keys = &op->insert_keys;
99 struct bkey_buf _new, _insert;
102 bch2_bkey_buf_init(&_new);
103 bch2_bkey_buf_init(&_insert);
104 bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
106 bch2_trans_iter_init(trans, &iter, m->btree_id,
107 bkey_start_pos(&bch2_keylist_front(keys)->k),
108 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
112 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
113 struct bkey_i *insert = NULL;
114 struct bkey_i_extent *new;
115 const union bch_extent_entry *entry_c;
116 union bch_extent_entry *entry;
117 struct extent_ptr_decoded p;
118 struct bch_extent_ptr *ptr;
119 const struct bch_extent_ptr *ptr_c;
120 struct bpos next_pos;
121 bool should_check_enospc;
122 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
123 unsigned rewrites_found = 0, durability, i;
125 bch2_trans_begin(trans);
127 k = bch2_btree_iter_peek_slot(&iter);
132 new = bkey_i_to_extent(bch2_keylist_front(keys));
134 if (!bch2_extents_match(k, old)) {
135 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
140 bkey_reassemble(_insert.k, k);
143 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
144 new = bkey_i_to_extent(_new.k);
145 bch2_cut_front(iter.pos, &new->k_i);
147 bch2_cut_front(iter.pos, insert);
148 bch2_cut_back(new->k.p, insert);
149 bch2_cut_back(insert->k.p, &new->k_i);
152 * @old: extent that we read from
153 * @insert: key that we're going to update, initialized from
154 * extent currently in btree - same as @old unless we raced with
156 * @new: extent with new pointers that we'll be adding to @insert
158 * Fist, drop rewrite_ptrs from @new:
161 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
162 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
163 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
165 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
168 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
170 rewrites_found |= 1U << i;
175 if (m->data_opts.rewrite_ptrs &&
177 bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
178 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
183 * A replica that we just wrote might conflict with a replica
184 * that we want to keep, due to racing with another move:
186 restart_drop_conflicting_replicas:
187 extent_for_each_ptr(extent_i_to_s(new), ptr)
188 if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
190 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
191 goto restart_drop_conflicting_replicas;
194 if (!bkey_val_u64s(&new->k)) {
195 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
199 /* Now, drop pointers that conflict with what we just wrote: */
200 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
201 if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
202 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
204 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
205 bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
207 /* Now, drop excess replicas: */
208 restart_drop_extra_replicas:
209 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
210 unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
213 durability - ptr_durability >= m->op.opts.data_replicas) {
214 durability -= ptr_durability;
215 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), &entry->ptr);
217 * Currently, we're dropping unneeded replicas
218 * instead of marking them as cached, since
219 * cached data in stripe buckets prevents them
221 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
223 goto restart_drop_extra_replicas;
227 /* Finally, add the pointers we just wrote: */
228 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
229 bch2_extent_ptr_decoded_append(insert, &p);
231 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
232 bch2_extent_normalize(c, bkey_i_to_s(insert));
234 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
235 &should_check_enospc,
237 &disk_sectors_delta);
241 if (disk_sectors_delta > (s64) op->res.sectors) {
242 ret = bch2_disk_reservation_add(c, &op->res,
243 disk_sectors_delta - op->res.sectors,
245 ? BCH_DISK_RESERVATION_NOFAIL : 0);
250 next_pos = insert->k.p;
252 ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
253 k.k->p, bkey_start_pos(&insert->k)) ?:
254 bch2_insert_snapshot_whiteouts(trans, m->btree_id,
255 k.k->p, insert->k.p) ?:
256 bch2_bkey_set_needs_rebalance(c, insert,
257 op->opts.background_target,
258 op->opts.background_compression) ?:
259 bch2_trans_update(trans, &iter, insert,
260 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
261 bch2_trans_commit(trans, &op->res,
263 BTREE_INSERT_NOCHECK_RW|
265 m->data_opts.btree_insert_flags);
267 bch2_btree_iter_set_pos(&iter, next_pos);
269 this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
270 trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
273 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
278 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
279 bch2_keylist_pop_front(keys);
280 if (bch2_keylist_empty(keys))
285 if (m->stats && m->stats) {
286 BUG_ON(k.k->p.offset <= iter.pos.offset);
287 atomic64_inc(&m->stats->keys_raced);
288 atomic64_add(k.k->p.offset - iter.pos.offset,
289 &m->stats->sectors_raced);
292 this_cpu_inc(c->counters[BCH_COUNTER_move_extent_fail]);
294 bch2_btree_iter_advance(&iter);
298 bch2_trans_iter_exit(trans, &iter);
299 bch2_bkey_buf_exit(&_insert, c);
300 bch2_bkey_buf_exit(&_new, c);
301 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
305 int bch2_data_update_index_update(struct bch_write_op *op)
307 return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
310 void bch2_data_update_read_done(struct data_update *m,
311 struct bch_extent_crc_unpacked crc)
313 /* write bio must own pages: */
314 BUG_ON(!m->op.wbio.bio.bi_vcnt);
317 m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
319 closure_call(&m->op.cl, bch2_write, NULL, NULL);
322 void bch2_data_update_exit(struct data_update *update)
324 struct bch_fs *c = update->op.c;
325 struct bkey_ptrs_c ptrs =
326 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
327 const struct bch_extent_ptr *ptr;
329 bkey_for_each_ptr(ptrs, ptr) {
330 if (c->opts.nocow_enabled)
331 bch2_bucket_nocow_unlock(&c->nocow_locks,
332 PTR_BUCKET_POS(c, ptr), 0);
333 percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
336 bch2_bkey_buf_exit(&update->k, c);
337 bch2_disk_reservation_put(c, &update->op.res);
338 bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
341 void bch2_update_unwritten_extent(struct btree_trans *trans,
342 struct data_update *update)
344 struct bch_fs *c = update->op.c;
345 struct bio *bio = &update->op.wbio.bio;
346 struct bkey_i_extent *e;
347 struct write_point *wp;
348 struct bch_extent_ptr *ptr;
350 struct btree_iter iter;
354 closure_init_stack(&cl);
355 bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
357 while (bio_sectors(bio)) {
358 unsigned sectors = bio_sectors(bio);
360 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
362 ret = lockrestart_do(trans, ({
363 k = bch2_btree_iter_peek_slot(&iter);
366 bch2_trans_iter_exit(trans, &iter);
368 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
371 e = bkey_extent_init(update->op.insert_keys.top);
372 e->k.p = update->op.pos;
374 ret = bch2_alloc_sectors_start_trans(trans,
377 update->op.write_point,
378 &update->op.devs_have,
379 update->op.nr_replicas,
380 update->op.nr_replicas,
381 update->op.watermark,
383 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
384 bch2_trans_unlock(trans);
392 sectors = min(sectors, wp->sectors_free);
394 bch2_key_resize(&e->k, sectors);
396 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
397 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
398 bch2_alloc_sectors_done(c, wp);
400 bio_advance(bio, sectors << 9);
401 update->op.pos.offset += sectors;
403 extent_for_each_ptr(extent_i_to_s(e), ptr)
404 ptr->unwritten = true;
405 bch2_keylist_push(&update->op.insert_keys);
407 ret = __bch2_data_update_index_update(trans, &update->op);
409 bch2_open_buckets_put(c, &update->op.open_buckets);
415 if (closure_nr_remaining(&cl) != 1) {
416 bch2_trans_unlock(trans);
421 int bch2_data_update_init(struct btree_trans *trans,
422 struct moving_context *ctxt,
423 struct data_update *m,
424 struct write_point_specifier wp,
425 struct bch_io_opts io_opts,
426 struct data_update_opts data_opts,
427 enum btree_id btree_id,
430 struct bch_fs *c = trans->c;
431 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
432 const union bch_extent_entry *entry;
433 struct extent_ptr_decoded p;
434 const struct bch_extent_ptr *ptr;
435 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
436 unsigned ptrs_locked = 0;
439 bch2_bkey_buf_init(&m->k);
440 bch2_bkey_buf_reassemble(&m->k, c, k);
441 m->btree_id = btree_id;
442 m->data_opts = data_opts;
444 m->stats = ctxt ? ctxt->stats : NULL;
446 bch2_write_op_init(&m->op, c, io_opts);
447 m->op.pos = bkey_start_pos(k.k);
448 m->op.version = k.k->version;
449 m->op.target = data_opts.target;
450 m->op.write_point = wp;
451 m->op.nr_replicas = 0;
452 m->op.flags |= BCH_WRITE_PAGES_STABLE|
453 BCH_WRITE_PAGES_OWNED|
454 BCH_WRITE_DATA_ENCODED|
456 m->data_opts.write_flags;
457 m->op.compression_opt = io_opts.background_compression ?: io_opts.compression;
458 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
460 bkey_for_each_ptr(ptrs, ptr)
461 percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
464 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
467 if (((1U << i) & m->data_opts.rewrite_ptrs)) {
468 BUG_ON(p.ptr.cached);
470 if (crc_is_compressed(p.crc))
471 reserve_sectors += k.k->size;
473 m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
474 } else if (!p.ptr.cached) {
475 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
479 * op->csum_type is normally initialized from the fs/file's
480 * current options - but if an extent is encrypted, we require
481 * that it stays encrypted:
483 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
484 m->op.nonce = p.crc.nonce + p.crc.offset;
485 m->op.csum_type = p.crc.csum_type;
488 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
489 m->op.incompressible = true;
491 if (c->opts.nocow_enabled) {
493 move_ctxt_wait_event(ctxt,
494 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
495 PTR_BUCKET_POS(c, &p.ptr), 0)) ||
496 !atomic_read(&ctxt->read_sectors));
499 bch2_bucket_nocow_lock(&c->nocow_locks,
500 PTR_BUCKET_POS(c, &p.ptr), 0);
502 if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
503 PTR_BUCKET_POS(c, &p.ptr), 0)) {
504 ret = -BCH_ERR_nocow_lock_blocked;
508 ptrs_locked |= (1U << i);
514 if (reserve_sectors) {
515 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
516 m->data_opts.extra_replicas
518 : BCH_DISK_RESERVATION_NOFAIL);
523 m->op.nr_replicas += m->data_opts.extra_replicas;
524 m->op.nr_replicas_required = m->op.nr_replicas;
526 BUG_ON(!m->op.nr_replicas);
528 /* Special handling required: */
529 if (bkey_extent_is_unwritten(k))
530 return -BCH_ERR_unwritten_extent_update;
534 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
535 if ((1U << i) & ptrs_locked)
536 bch2_bucket_nocow_unlock(&c->nocow_locks,
537 PTR_BUCKET_POS(c, &p.ptr), 0);
538 percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
542 bch2_bkey_buf_exit(&m->k, c);
543 bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
547 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
549 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
550 const struct bch_extent_ptr *ptr;
553 bkey_for_each_ptr(ptrs, ptr) {
554 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
555 opts->kill_ptrs |= 1U << i;
556 opts->rewrite_ptrs ^= 1U << i;