1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
6 #include "btree_update.h"
8 #include "data_update.h"
15 #include "nocow_locking.h"
16 #include "subvolume.h"
19 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
21 if (trace_move_extent_finish_enabled()) {
22 struct printbuf buf = PRINTBUF;
24 bch2_bkey_val_to_text(&buf, c, k);
25 trace_move_extent_finish(c, buf.buf);
30 static void trace_move_extent_fail2(struct data_update *m,
32 struct bkey_s_c wrote,
33 struct bkey_i *insert,
36 struct bch_fs *c = m->op.c;
37 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
38 const union bch_extent_entry *entry;
39 struct bch_extent_ptr *ptr;
40 struct extent_ptr_decoded p;
41 struct printbuf buf = PRINTBUF;
42 unsigned i, rewrites_found = 0;
44 if (!trace_move_extent_fail_enabled())
51 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
52 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
53 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
55 rewrites_found |= 1U << i;
60 prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
61 (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
62 (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
63 (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
64 (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
66 prt_printf(&buf, "\nrewrites found: %u%u%u%u",
67 (rewrites_found & (1 << 0)) != 0,
68 (rewrites_found & (1 << 1)) != 0,
69 (rewrites_found & (1 << 2)) != 0,
70 (rewrites_found & (1 << 3)) != 0);
72 prt_str(&buf, "\nold: ");
73 bch2_bkey_val_to_text(&buf, c, old);
75 prt_str(&buf, "\nnew: ");
76 bch2_bkey_val_to_text(&buf, c, new);
78 prt_str(&buf, "\nwrote: ");
79 bch2_bkey_val_to_text(&buf, c, wrote);
82 prt_str(&buf, "\ninsert: ");
83 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
86 trace_move_extent_fail(c, buf.buf);
90 static int __bch2_data_update_index_update(struct btree_trans *trans,
91 struct bch_write_op *op)
93 struct bch_fs *c = op->c;
94 struct btree_iter iter;
95 struct data_update *m =
96 container_of(op, struct data_update, op);
97 struct keylist *keys = &op->insert_keys;
98 struct bkey_buf _new, _insert;
101 bch2_bkey_buf_init(&_new);
102 bch2_bkey_buf_init(&_insert);
103 bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
105 bch2_trans_iter_init(trans, &iter, m->btree_id,
106 bkey_start_pos(&bch2_keylist_front(keys)->k),
107 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
111 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
112 struct bkey_i *insert = NULL;
113 struct bkey_i_extent *new;
114 const union bch_extent_entry *entry_c;
115 union bch_extent_entry *entry;
116 struct extent_ptr_decoded p;
117 struct bch_extent_ptr *ptr;
118 const struct bch_extent_ptr *ptr_c;
119 struct bpos next_pos;
120 bool should_check_enospc;
121 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
122 unsigned rewrites_found = 0, durability, i;
124 bch2_trans_begin(trans);
126 k = bch2_btree_iter_peek_slot(&iter);
131 new = bkey_i_to_extent(bch2_keylist_front(keys));
133 if (!bch2_extents_match(k, old)) {
134 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
139 bkey_reassemble(_insert.k, k);
142 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
143 new = bkey_i_to_extent(_new.k);
144 bch2_cut_front(iter.pos, &new->k_i);
146 bch2_cut_front(iter.pos, insert);
147 bch2_cut_back(new->k.p, insert);
148 bch2_cut_back(insert->k.p, &new->k_i);
151 * @old: extent that we read from
152 * @insert: key that we're going to update, initialized from
153 * extent currently in btree - same as @old unless we raced with
155 * @new: extent with new pointers that we'll be adding to @insert
157 * Fist, drop rewrite_ptrs from @new:
160 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
161 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
162 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
164 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
167 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
169 rewrites_found |= 1U << i;
174 if (m->data_opts.rewrite_ptrs &&
176 bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
177 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
182 * A replica that we just wrote might conflict with a replica
183 * that we want to keep, due to racing with another move:
185 restart_drop_conflicting_replicas:
186 extent_for_each_ptr(extent_i_to_s(new), ptr)
187 if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
189 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
190 goto restart_drop_conflicting_replicas;
193 if (!bkey_val_u64s(&new->k)) {
194 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
198 /* Now, drop pointers that conflict with what we just wrote: */
199 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
200 if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
201 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
203 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
204 bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
206 /* Now, drop excess replicas: */
207 restart_drop_extra_replicas:
208 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
209 unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
212 durability - ptr_durability >= m->op.opts.data_replicas) {
213 durability -= ptr_durability;
214 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), &entry->ptr);
216 * Currently, we're dropping unneeded replicas
217 * instead of marking them as cached, since
218 * cached data in stripe buckets prevents them
220 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
222 goto restart_drop_extra_replicas;
226 /* Finally, add the pointers we just wrote: */
227 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
228 bch2_extent_ptr_decoded_append(insert, &p);
230 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
231 bch2_extent_normalize(c, bkey_i_to_s(insert));
233 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
234 &should_check_enospc,
236 &disk_sectors_delta);
240 if (disk_sectors_delta > (s64) op->res.sectors) {
241 ret = bch2_disk_reservation_add(c, &op->res,
242 disk_sectors_delta - op->res.sectors,
244 ? BCH_DISK_RESERVATION_NOFAIL : 0);
249 next_pos = insert->k.p;
251 ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
252 k.k->p, bkey_start_pos(&insert->k)) ?:
253 bch2_insert_snapshot_whiteouts(trans, m->btree_id,
254 k.k->p, insert->k.p);
258 ret = bch2_trans_update(trans, &iter, insert,
259 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
260 bch2_trans_commit(trans, &op->res,
262 BTREE_INSERT_NOCHECK_RW|
264 m->data_opts.btree_insert_flags);
266 bch2_btree_iter_set_pos(&iter, next_pos);
268 this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
269 trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
272 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
277 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
278 bch2_keylist_pop_front(keys);
279 if (bch2_keylist_empty(keys))
284 if (m->ctxt && m->ctxt->stats) {
285 BUG_ON(k.k->p.offset <= iter.pos.offset);
286 atomic64_inc(&m->ctxt->stats->keys_raced);
287 atomic64_add(k.k->p.offset - iter.pos.offset,
288 &m->ctxt->stats->sectors_raced);
291 this_cpu_inc(c->counters[BCH_COUNTER_move_extent_fail]);
293 bch2_btree_iter_advance(&iter);
297 bch2_trans_iter_exit(trans, &iter);
298 bch2_bkey_buf_exit(&_insert, c);
299 bch2_bkey_buf_exit(&_new, c);
300 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
304 int bch2_data_update_index_update(struct bch_write_op *op)
306 return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
309 void bch2_data_update_read_done(struct data_update *m,
310 struct bch_extent_crc_unpacked crc)
312 /* write bio must own pages: */
313 BUG_ON(!m->op.wbio.bio.bi_vcnt);
316 m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
318 closure_call(&m->op.cl, bch2_write, NULL, NULL);
321 void bch2_data_update_exit(struct data_update *update)
323 struct bch_fs *c = update->op.c;
324 struct bkey_ptrs_c ptrs =
325 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
326 const struct bch_extent_ptr *ptr;
328 bkey_for_each_ptr(ptrs, ptr) {
329 if (c->opts.nocow_enabled)
330 bch2_bucket_nocow_unlock(&c->nocow_locks,
331 PTR_BUCKET_POS(c, ptr), 0);
332 percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
335 bch2_bkey_buf_exit(&update->k, c);
336 bch2_disk_reservation_put(c, &update->op.res);
337 bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
340 void bch2_update_unwritten_extent(struct btree_trans *trans,
341 struct data_update *update)
343 struct bch_fs *c = update->op.c;
344 struct bio *bio = &update->op.wbio.bio;
345 struct bkey_i_extent *e;
346 struct write_point *wp;
347 struct bch_extent_ptr *ptr;
349 struct btree_iter iter;
353 closure_init_stack(&cl);
354 bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
356 while (bio_sectors(bio)) {
357 unsigned sectors = bio_sectors(bio);
359 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
361 ret = lockrestart_do(trans, ({
362 k = bch2_btree_iter_peek_slot(&iter);
365 bch2_trans_iter_exit(trans, &iter);
367 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
370 e = bkey_extent_init(update->op.insert_keys.top);
371 e->k.p = update->op.pos;
373 ret = bch2_alloc_sectors_start_trans(trans,
376 update->op.write_point,
377 &update->op.devs_have,
378 update->op.nr_replicas,
379 update->op.nr_replicas,
380 update->op.watermark,
382 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
383 bch2_trans_unlock(trans);
391 sectors = min(sectors, wp->sectors_free);
393 bch2_key_resize(&e->k, sectors);
395 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
396 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
397 bch2_alloc_sectors_done(c, wp);
399 bio_advance(bio, sectors << 9);
400 update->op.pos.offset += sectors;
402 extent_for_each_ptr(extent_i_to_s(e), ptr)
403 ptr->unwritten = true;
404 bch2_keylist_push(&update->op.insert_keys);
406 ret = __bch2_data_update_index_update(trans, &update->op);
408 bch2_open_buckets_put(c, &update->op.open_buckets);
414 if (closure_nr_remaining(&cl) != 1) {
415 bch2_trans_unlock(trans);
420 int bch2_data_update_init(struct btree_trans *trans,
421 struct moving_context *ctxt,
422 struct data_update *m,
423 struct write_point_specifier wp,
424 struct bch_io_opts io_opts,
425 struct data_update_opts data_opts,
426 enum btree_id btree_id,
429 struct bch_fs *c = trans->c;
430 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
431 const union bch_extent_entry *entry;
432 struct extent_ptr_decoded p;
433 const struct bch_extent_ptr *ptr;
434 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
435 unsigned ptrs_locked = 0;
438 bch2_bkey_buf_init(&m->k);
439 bch2_bkey_buf_reassemble(&m->k, c, k);
440 m->btree_id = btree_id;
441 m->data_opts = data_opts;
443 bch2_write_op_init(&m->op, c, io_opts);
444 m->op.pos = bkey_start_pos(k.k);
445 m->op.version = k.k->version;
446 m->op.target = data_opts.target;
447 m->op.write_point = wp;
448 m->op.nr_replicas = 0;
449 m->op.flags |= BCH_WRITE_PAGES_STABLE|
450 BCH_WRITE_PAGES_OWNED|
451 BCH_WRITE_DATA_ENCODED|
453 m->data_opts.write_flags;
454 m->op.compression_opt = io_opts.background_compression ?: io_opts.compression;
455 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
457 bkey_for_each_ptr(ptrs, ptr)
458 percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
461 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
464 if (((1U << i) & m->data_opts.rewrite_ptrs)) {
465 BUG_ON(p.ptr.cached);
467 if (crc_is_compressed(p.crc))
468 reserve_sectors += k.k->size;
470 m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
471 } else if (!p.ptr.cached) {
472 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
476 * op->csum_type is normally initialized from the fs/file's
477 * current options - but if an extent is encrypted, we require
478 * that it stays encrypted:
480 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
481 m->op.nonce = p.crc.nonce + p.crc.offset;
482 m->op.csum_type = p.crc.csum_type;
485 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
486 m->op.incompressible = true;
488 if (c->opts.nocow_enabled) {
490 move_ctxt_wait_event(ctxt, trans,
491 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
492 PTR_BUCKET_POS(c, &p.ptr), 0)) ||
493 !atomic_read(&ctxt->read_sectors));
496 bch2_bucket_nocow_lock(&c->nocow_locks,
497 PTR_BUCKET_POS(c, &p.ptr), 0);
499 if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
500 PTR_BUCKET_POS(c, &p.ptr), 0)) {
501 ret = -BCH_ERR_nocow_lock_blocked;
505 ptrs_locked |= (1U << i);
511 if (reserve_sectors) {
512 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
513 m->data_opts.extra_replicas
515 : BCH_DISK_RESERVATION_NOFAIL);
520 m->op.nr_replicas += m->data_opts.extra_replicas;
521 m->op.nr_replicas_required = m->op.nr_replicas;
523 BUG_ON(!m->op.nr_replicas);
525 /* Special handling required: */
526 if (bkey_extent_is_unwritten(k))
527 return -BCH_ERR_unwritten_extent_update;
531 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
532 if ((1U << i) & ptrs_locked)
533 bch2_bucket_nocow_unlock(&c->nocow_locks,
534 PTR_BUCKET_POS(c, &p.ptr), 0);
535 percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
539 bch2_bkey_buf_exit(&m->k, c);
540 bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
544 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
546 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
547 const struct bch_extent_ptr *ptr;
550 bkey_for_each_ptr(ptrs, ptr) {
551 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
552 opts->kill_ptrs |= 1U << i;
553 opts->rewrite_ptrs ^= 1U << i;