1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
6 #include "btree_update.h"
8 #include "data_update.h"
15 #include "nocow_locking.h"
16 #include "subvolume.h"
18 #include <trace/events/bcachefs.h>
20 static int insert_snapshot_whiteouts(struct btree_trans *trans,
25 struct bch_fs *c = trans->c;
26 struct btree_iter iter, iter2;
27 struct bkey_s_c k, k2;
29 struct bkey_i *update;
32 if (!btree_type_has_snapshots(id))
37 if (!bch2_snapshot_has_children(c, old_pos.snapshot))
40 bch2_trans_iter_init(trans, &iter, id, old_pos,
41 BTREE_ITER_NOT_EXTENTS|
42 BTREE_ITER_ALL_SNAPSHOTS);
44 k = bch2_btree_iter_prev(&iter);
52 if (!bkey_eq(old_pos, k.k->p))
55 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot) &&
56 !snapshot_list_has_ancestor(c, &s, k.k->p.snapshot)) {
57 struct bpos whiteout_pos = new_pos;
59 whiteout_pos.snapshot = k.k->p.snapshot;
61 bch2_trans_iter_init(trans, &iter2, id, whiteout_pos,
62 BTREE_ITER_NOT_EXTENTS|
64 k2 = bch2_btree_iter_peek_slot(&iter2);
67 if (!ret && k2.k->type == KEY_TYPE_deleted) {
68 update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
69 ret = PTR_ERR_OR_ZERO(update);
73 bkey_init(&update->k);
74 update->k.p = whiteout_pos;
75 update->k.type = KEY_TYPE_whiteout;
77 ret = bch2_trans_update(trans, &iter2, update,
78 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
80 bch2_trans_iter_exit(trans, &iter2);
85 ret = snapshot_list_add(c, &s, k.k->p.snapshot);
90 bch2_trans_iter_exit(trans, &iter);
96 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
98 if (trace_move_extent_finish_enabled()) {
99 struct printbuf buf = PRINTBUF;
101 bch2_bkey_val_to_text(&buf, c, k);
102 trace_move_extent_finish(c, buf.buf);
107 static void trace_move_extent_fail2(struct data_update *m,
109 struct bkey_s_c wrote,
110 struct bkey_i *insert,
113 struct bch_fs *c = m->op.c;
114 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
115 const union bch_extent_entry *entry;
116 struct bch_extent_ptr *ptr;
117 struct extent_ptr_decoded p;
118 struct printbuf buf = PRINTBUF;
119 unsigned i, rewrites_found = 0;
121 if (!trace_move_extent_fail_enabled())
128 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
130 new_s.k = (void *) new.k;
131 new_s.v = (void *) new.v;
133 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
134 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
136 rewrites_found |= 1U << i;
141 prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
142 (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
143 (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
144 (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
145 (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
147 prt_printf(&buf, "\nrewrites found: %u%u%u%u",
148 (rewrites_found & (1 << 0)) != 0,
149 (rewrites_found & (1 << 1)) != 0,
150 (rewrites_found & (1 << 2)) != 0,
151 (rewrites_found & (1 << 3)) != 0);
153 prt_str(&buf, "\nold: ");
154 bch2_bkey_val_to_text(&buf, c, old);
156 prt_str(&buf, "\nnew: ");
157 bch2_bkey_val_to_text(&buf, c, new);
159 prt_str(&buf, "\nwrote: ");
160 bch2_bkey_val_to_text(&buf, c, wrote);
163 prt_str(&buf, "\ninsert: ");
164 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
167 trace_move_extent_fail(c, buf.buf);
171 static int __bch2_data_update_index_update(struct btree_trans *trans,
172 struct bch_write_op *op)
174 struct bch_fs *c = op->c;
175 struct btree_iter iter;
176 struct data_update *m =
177 container_of(op, struct data_update, op);
178 struct keylist *keys = &op->insert_keys;
179 struct bkey_buf _new, _insert;
182 bch2_bkey_buf_init(&_new);
183 bch2_bkey_buf_init(&_insert);
184 bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
186 bch2_trans_iter_init(trans, &iter, m->btree_id,
187 bkey_start_pos(&bch2_keylist_front(keys)->k),
188 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
192 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
193 struct bkey_i *insert = NULL;
194 struct bkey_i_extent *new;
195 const union bch_extent_entry *entry_c;
196 union bch_extent_entry *entry;
197 struct extent_ptr_decoded p;
198 struct bch_extent_ptr *ptr;
199 const struct bch_extent_ptr *ptr_c;
200 struct bpos next_pos;
201 bool should_check_enospc;
202 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
203 unsigned rewrites_found = 0, durability, i;
205 bch2_trans_begin(trans);
207 k = bch2_btree_iter_peek_slot(&iter);
212 new = bkey_i_to_extent(bch2_keylist_front(keys));
214 if (!bch2_extents_match(k, old)) {
215 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
220 bkey_reassemble(_insert.k, k);
223 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
224 new = bkey_i_to_extent(_new.k);
225 bch2_cut_front(iter.pos, &new->k_i);
227 bch2_cut_front(iter.pos, insert);
228 bch2_cut_back(new->k.p, insert);
229 bch2_cut_back(insert->k.p, &new->k_i);
232 * @old: extent that we read from
233 * @insert: key that we're going to update, initialized from
234 * extent currently in btree - same as @old unless we raced with
236 * @new: extent with new pointers that we'll be adding to @insert
238 * Fist, drop rewrite_ptrs from @new:
241 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
242 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
243 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
245 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
248 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
250 rewrites_found |= 1U << i;
255 if (m->data_opts.rewrite_ptrs &&
257 bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
258 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
263 * A replica that we just wrote might conflict with a replica
264 * that we want to keep, due to racing with another move:
266 restart_drop_conflicting_replicas:
267 extent_for_each_ptr(extent_i_to_s(new), ptr)
268 if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
270 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
271 goto restart_drop_conflicting_replicas;
274 if (!bkey_val_u64s(&new->k)) {
275 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
279 /* Now, drop pointers that conflict with what we just wrote: */
280 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
281 if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
282 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
284 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
285 bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
287 /* Now, drop excess replicas: */
288 restart_drop_extra_replicas:
289 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
290 unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
293 durability - ptr_durability >= m->op.opts.data_replicas) {
294 durability -= ptr_durability;
295 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), &entry->ptr);
297 * Currently, we're dropping unneeded replicas
298 * instead of marking them as cached, since
299 * cached data in stripe buckets prevents them
301 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
303 goto restart_drop_extra_replicas;
307 /* Finally, add the pointers we just wrote: */
308 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
309 bch2_extent_ptr_decoded_append(insert, &p);
311 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
312 bch2_extent_normalize(c, bkey_i_to_s(insert));
314 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
315 &should_check_enospc,
317 &disk_sectors_delta);
321 if (disk_sectors_delta > (s64) op->res.sectors) {
322 ret = bch2_disk_reservation_add(c, &op->res,
323 disk_sectors_delta - op->res.sectors,
325 ? BCH_DISK_RESERVATION_NOFAIL : 0);
330 next_pos = insert->k.p;
332 if (!bkey_eq(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
333 ret = insert_snapshot_whiteouts(trans, m->btree_id, k.k->p,
334 bkey_start_pos(&insert->k));
339 if (!bkey_eq(insert->k.p, k.k->p)) {
340 ret = insert_snapshot_whiteouts(trans, m->btree_id,
341 k.k->p, insert->k.p);
346 ret = bch2_trans_update(trans, &iter, insert,
347 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
348 bch2_trans_commit(trans, &op->res,
350 BTREE_INSERT_NOCHECK_RW|
352 m->data_opts.btree_insert_flags);
354 bch2_btree_iter_set_pos(&iter, next_pos);
356 this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
357 trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
360 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
365 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
366 bch2_keylist_pop_front(keys);
367 if (bch2_keylist_empty(keys))
372 if (m->ctxt && m->ctxt->stats) {
373 BUG_ON(k.k->p.offset <= iter.pos.offset);
374 atomic64_inc(&m->ctxt->stats->keys_raced);
375 atomic64_add(k.k->p.offset - iter.pos.offset,
376 &m->ctxt->stats->sectors_raced);
379 this_cpu_add(c->counters[BCH_COUNTER_move_extent_fail], new->k.size);
381 bch2_btree_iter_advance(&iter);
385 bch2_trans_iter_exit(trans, &iter);
386 bch2_bkey_buf_exit(&_insert, c);
387 bch2_bkey_buf_exit(&_new, c);
388 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
392 int bch2_data_update_index_update(struct bch_write_op *op)
394 return bch2_trans_run(op->c, __bch2_data_update_index_update(&trans, op));
397 void bch2_data_update_read_done(struct data_update *m,
398 struct bch_extent_crc_unpacked crc)
400 /* write bio must own pages: */
401 BUG_ON(!m->op.wbio.bio.bi_vcnt);
404 m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
406 closure_call(&m->op.cl, bch2_write, NULL, NULL);
409 void bch2_data_update_exit(struct data_update *update)
411 struct bch_fs *c = update->op.c;
412 struct bkey_ptrs_c ptrs =
413 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
414 const struct bch_extent_ptr *ptr;
416 bkey_for_each_ptr(ptrs, ptr) {
417 if (c->opts.nocow_enabled)
418 bch2_bucket_nocow_unlock(&c->nocow_locks,
419 PTR_BUCKET_POS(c, ptr), 0);
420 percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
423 bch2_bkey_buf_exit(&update->k, c);
424 bch2_disk_reservation_put(c, &update->op.res);
425 bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
428 void bch2_update_unwritten_extent(struct btree_trans *trans,
429 struct data_update *update)
431 struct bch_fs *c = update->op.c;
432 struct bio *bio = &update->op.wbio.bio;
433 struct bkey_i_extent *e;
434 struct write_point *wp;
435 struct bch_extent_ptr *ptr;
437 struct btree_iter iter;
441 closure_init_stack(&cl);
442 bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
444 while (bio_sectors(bio)) {
445 unsigned sectors = bio_sectors(bio);
447 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
449 ret = lockrestart_do(trans, ({
450 k = bch2_btree_iter_peek_slot(&iter);
453 bch2_trans_iter_exit(trans, &iter);
455 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
458 e = bkey_extent_init(update->op.insert_keys.top);
459 e->k.p = update->op.pos;
461 ret = bch2_alloc_sectors_start_trans(trans,
464 update->op.write_point,
465 &update->op.devs_have,
466 update->op.nr_replicas,
467 update->op.nr_replicas,
468 update->op.alloc_reserve,
470 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
471 bch2_trans_unlock(trans);
479 sectors = min(sectors, wp->sectors_free);
481 bch2_key_resize(&e->k, sectors);
483 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
484 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
485 bch2_alloc_sectors_done(c, wp);
487 bio_advance(bio, sectors << 9);
488 update->op.pos.offset += sectors;
490 extent_for_each_ptr(extent_i_to_s(e), ptr)
491 ptr->unwritten = true;
492 bch2_keylist_push(&update->op.insert_keys);
494 ret = __bch2_data_update_index_update(trans, &update->op);
496 bch2_open_buckets_put(c, &update->op.open_buckets);
502 if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) {
503 bch2_trans_unlock(trans);
508 int bch2_data_update_init(struct btree_trans *trans,
509 struct moving_context *ctxt,
510 struct data_update *m,
511 struct write_point_specifier wp,
512 struct bch_io_opts io_opts,
513 struct data_update_opts data_opts,
514 enum btree_id btree_id,
517 struct bch_fs *c = trans->c;
518 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
519 const union bch_extent_entry *entry;
520 struct extent_ptr_decoded p;
521 const struct bch_extent_ptr *ptr;
522 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
523 unsigned ptrs_locked = 0;
526 bch2_bkey_buf_init(&m->k);
527 bch2_bkey_buf_reassemble(&m->k, c, k);
528 m->btree_id = btree_id;
529 m->data_opts = data_opts;
531 bch2_write_op_init(&m->op, c, io_opts);
532 m->op.pos = bkey_start_pos(k.k);
533 m->op.version = k.k->version;
534 m->op.target = data_opts.target;
535 m->op.write_point = wp;
536 m->op.nr_replicas = 0;
537 m->op.flags |= BCH_WRITE_PAGES_STABLE|
538 BCH_WRITE_PAGES_OWNED|
539 BCH_WRITE_DATA_ENCODED|
541 m->data_opts.write_flags;
542 m->op.compression_type =
543 bch2_compression_opt_to_type[io_opts.background_compression ?:
544 io_opts.compression];
545 if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
546 m->op.alloc_reserve = RESERVE_movinggc;
548 bkey_for_each_ptr(ptrs, ptr)
549 percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
552 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
555 if (((1U << i) & m->data_opts.rewrite_ptrs)) {
556 BUG_ON(p.ptr.cached);
558 if (crc_is_compressed(p.crc))
559 reserve_sectors += k.k->size;
561 m->op.nr_replicas += bch2_extent_ptr_durability(c, &p);
562 } else if (!p.ptr.cached) {
563 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
567 * op->csum_type is normally initialized from the fs/file's
568 * current options - but if an extent is encrypted, we require
569 * that it stays encrypted:
571 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
572 m->op.nonce = p.crc.nonce + p.crc.offset;
573 m->op.csum_type = p.crc.csum_type;
576 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
577 m->op.incompressible = true;
579 if (c->opts.nocow_enabled) {
581 move_ctxt_wait_event(ctxt, trans,
582 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
583 PTR_BUCKET_POS(c, &p.ptr), 0)) ||
584 !atomic_read(&ctxt->read_sectors));
587 bch2_bucket_nocow_lock(&c->nocow_locks,
588 PTR_BUCKET_POS(c, &p.ptr), 0);
590 if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
591 PTR_BUCKET_POS(c, &p.ptr), 0)) {
592 ret = -BCH_ERR_nocow_lock_blocked;
596 ptrs_locked |= (1U << i);
602 if (reserve_sectors) {
603 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
604 m->data_opts.extra_replicas
606 : BCH_DISK_RESERVATION_NOFAIL);
611 m->op.nr_replicas += m->data_opts.extra_replicas;
612 m->op.nr_replicas_required = m->op.nr_replicas;
614 BUG_ON(!m->op.nr_replicas);
616 /* Special handling required: */
617 if (bkey_extent_is_unwritten(k))
618 return -BCH_ERR_unwritten_extent_update;
622 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
623 if ((1U << i) & ptrs_locked)
624 bch2_bucket_nocow_unlock(&c->nocow_locks,
625 PTR_BUCKET_POS(c, &p.ptr), 0);
626 percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
630 bch2_bkey_buf_exit(&m->k, c);
631 bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
635 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
637 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
638 const struct bch_extent_ptr *ptr;
641 bkey_for_each_ptr(ptrs, ptr) {
642 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
643 opts->kill_ptrs |= 1U << i;
644 opts->rewrite_ptrs ^= 1U << i;