1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
6 #include "btree_update.h"
8 #include "data_update.h"
14 #include "nocow_locking.h"
15 #include "subvolume.h"
17 #include <trace/events/bcachefs.h>
19 static int insert_snapshot_whiteouts(struct btree_trans *trans,
24 struct bch_fs *c = trans->c;
25 struct btree_iter iter, update_iter;
30 if (!btree_type_has_snapshots(id))
35 if (bkey_eq(old_pos, new_pos))
38 if (!snapshot_t(c, old_pos.snapshot)->children[0])
41 bch2_trans_iter_init(trans, &iter, id, old_pos,
42 BTREE_ITER_NOT_EXTENTS|
43 BTREE_ITER_ALL_SNAPSHOTS);
45 k = bch2_btree_iter_prev(&iter);
50 if (!bkey_eq(old_pos, k.k->p))
53 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
54 struct bkey_i *update;
56 if (snapshot_list_has_ancestor(c, &s, k.k->p.snapshot))
59 update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
61 ret = PTR_ERR_OR_ZERO(update);
65 bkey_init(&update->k);
66 update->k.p = new_pos;
67 update->k.p.snapshot = k.k->p.snapshot;
69 bch2_trans_iter_init(trans, &update_iter, id, update->k.p,
70 BTREE_ITER_NOT_EXTENTS|
71 BTREE_ITER_ALL_SNAPSHOTS|
73 ret = bch2_btree_iter_traverse(&update_iter) ?:
74 bch2_trans_update(trans, &update_iter, update,
75 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
76 bch2_trans_iter_exit(trans, &update_iter);
80 ret = snapshot_list_add(c, &s, k.k->p.snapshot);
85 bch2_trans_iter_exit(trans, &iter);
91 static void bch2_bkey_mark_dev_cached(struct bkey_s k, unsigned dev)
93 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
94 struct bch_extent_ptr *ptr;
96 bkey_for_each_ptr(ptrs, ptr)
101 static int __bch2_data_update_index_update(struct btree_trans *trans,
102 struct bch_write_op *op)
104 struct bch_fs *c = op->c;
105 struct btree_iter iter;
106 struct data_update *m =
107 container_of(op, struct data_update, op);
108 struct keylist *keys = &op->insert_keys;
109 struct bkey_buf _new, _insert;
112 bch2_bkey_buf_init(&_new);
113 bch2_bkey_buf_init(&_insert);
114 bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
116 bch2_trans_iter_init(trans, &iter, m->btree_id,
117 bkey_start_pos(&bch2_keylist_front(keys)->k),
118 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
122 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
123 struct bkey_i *insert;
124 struct bkey_i_extent *new;
125 const union bch_extent_entry *entry;
126 struct extent_ptr_decoded p;
127 struct bpos next_pos;
128 bool did_work = false;
129 bool should_check_enospc;
130 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
133 bch2_trans_begin(trans);
135 k = bch2_btree_iter_peek_slot(&iter);
140 new = bkey_i_to_extent(bch2_keylist_front(keys));
142 if (!bch2_extents_match(k, old))
145 bkey_reassemble(_insert.k, k);
148 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
149 new = bkey_i_to_extent(_new.k);
150 bch2_cut_front(iter.pos, &new->k_i);
152 bch2_cut_front(iter.pos, insert);
153 bch2_cut_back(new->k.p, insert);
154 bch2_cut_back(insert->k.p, &new->k_i);
157 * @old: extent that we read from
158 * @insert: key that we're going to update, initialized from
159 * extent currently in btree - same as @old unless we raced with
161 * @new: extent with new pointers that we'll be adding to @insert
163 * Fist, drop rewrite_ptrs from @new:
166 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
167 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
168 bch2_extent_has_ptr(old, p, bkey_i_to_s_c(insert))) {
170 * If we're going to be adding a pointer to the
171 * same device, we have to drop the old one -
172 * otherwise, we can just mark it cached:
174 if (bch2_bkey_has_device(bkey_i_to_s_c(&new->k_i), p.ptr.dev))
175 bch2_bkey_drop_device_noerror(bkey_i_to_s(insert), p.ptr.dev);
177 bch2_bkey_mark_dev_cached(bkey_i_to_s(insert), p.ptr.dev);
184 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) {
185 const struct bch_extent_ptr *existing_ptr =
186 bch2_bkey_has_device(bkey_i_to_s_c(insert), p.ptr.dev);
188 if (existing_ptr && existing_ptr->cached) {
190 * We're replacing a cached pointer with a non
193 bch2_bkey_drop_device_noerror(bkey_i_to_s(insert),
195 } else if (existing_ptr) {
197 * raced with another move op? extent already
198 * has a pointer to the device we just wrote
204 bch2_extent_ptr_decoded_append(insert, &p);
211 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
212 bch2_extent_normalize(c, bkey_i_to_s(insert));
214 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
215 &should_check_enospc,
217 &disk_sectors_delta);
221 if (disk_sectors_delta > (s64) op->res.sectors) {
222 ret = bch2_disk_reservation_add(c, &op->res,
223 disk_sectors_delta - op->res.sectors,
225 ? BCH_DISK_RESERVATION_NOFAIL : 0);
230 next_pos = insert->k.p;
232 ret = insert_snapshot_whiteouts(trans, m->btree_id,
233 k.k->p, insert->k.p) ?:
234 bch2_trans_update(trans, &iter, insert,
235 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
236 bch2_trans_commit(trans, &op->res,
239 m->data_opts.btree_insert_flags);
241 bch2_btree_iter_set_pos(&iter, next_pos);
243 this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
244 trace_move_extent_finish(&new->k);
247 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
252 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
253 bch2_keylist_pop_front(keys);
254 if (bch2_keylist_empty(keys))
260 BUG_ON(k.k->p.offset <= iter.pos.offset);
261 atomic64_inc(&m->ctxt->stats->keys_raced);
262 atomic64_add(k.k->p.offset - iter.pos.offset,
263 &m->ctxt->stats->sectors_raced);
266 this_cpu_add(c->counters[BCH_COUNTER_move_extent_fail], new->k.size);
267 trace_move_extent_fail(&new->k);
269 bch2_btree_iter_advance(&iter);
273 bch2_trans_iter_exit(trans, &iter);
274 bch2_bkey_buf_exit(&_insert, c);
275 bch2_bkey_buf_exit(&_new, c);
276 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
280 int bch2_data_update_index_update(struct bch_write_op *op)
282 struct bch_fs *c = op->c;
283 struct btree_trans trans;
286 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
287 ret = __bch2_data_update_index_update(&trans, op);
288 bch2_trans_exit(&trans);
293 void bch2_data_update_read_done(struct data_update *m,
294 struct bch_extent_crc_unpacked crc)
296 /* write bio must own pages: */
297 BUG_ON(!m->op.wbio.bio.bi_vcnt);
300 m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
302 closure_call(&m->op.cl, bch2_write, NULL, NULL);
305 void bch2_data_update_exit(struct data_update *update)
307 struct bch_fs *c = update->op.c;
308 struct bkey_ptrs_c ptrs =
309 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
310 const struct bch_extent_ptr *ptr;
312 bkey_for_each_ptr(ptrs, ptr)
313 bch2_bucket_nocow_unlock(&c->nocow_locks,
314 PTR_BUCKET_POS(c, ptr), 0);
316 bch2_bkey_buf_exit(&update->k, c);
317 bch2_disk_reservation_put(c, &update->op.res);
318 bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
321 void bch2_update_unwritten_extent(struct btree_trans *trans,
322 struct data_update *update)
324 struct bch_fs *c = update->op.c;
325 struct bio *bio = &update->op.wbio.bio;
326 struct bkey_i_extent *e;
327 struct write_point *wp;
328 struct bch_extent_ptr *ptr;
330 struct btree_iter iter;
334 closure_init_stack(&cl);
335 bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
337 while (bio_sectors(bio)) {
338 unsigned sectors = bio_sectors(bio);
340 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
342 ret = lockrestart_do(trans, ({
343 k = bch2_btree_iter_peek_slot(&iter);
346 bch2_trans_iter_exit(trans, &iter);
348 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
351 e = bkey_extent_init(update->op.insert_keys.top);
352 e->k.p = update->op.pos;
354 ret = bch2_alloc_sectors_start_trans(trans,
357 update->op.write_point,
358 &update->op.devs_have,
359 update->op.nr_replicas,
360 update->op.nr_replicas,
361 update->op.alloc_reserve,
363 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
364 bch2_trans_unlock(trans);
372 sectors = min(sectors, wp->sectors_free);
374 bch2_key_resize(&e->k, sectors);
376 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
377 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
378 bch2_alloc_sectors_done(c, wp);
380 bio_advance(bio, sectors << 9);
381 update->op.pos.offset += sectors;
383 extent_for_each_ptr(extent_i_to_s(e), ptr)
384 ptr->unwritten = true;
385 bch2_keylist_push(&update->op.insert_keys);
387 ret = __bch2_data_update_index_update(trans, &update->op);
389 bch2_open_buckets_put(c, &update->op.open_buckets);
395 if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) {
396 bch2_trans_unlock(trans);
401 int bch2_data_update_init(struct btree_trans *trans,
402 struct moving_context *ctxt,
403 struct data_update *m,
404 struct write_point_specifier wp,
405 struct bch_io_opts io_opts,
406 struct data_update_opts data_opts,
407 enum btree_id btree_id,
410 struct bch_fs *c = trans->c;
411 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
412 const union bch_extent_entry *entry;
413 struct extent_ptr_decoded p;
414 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
415 unsigned int ptrs_locked = 0;
418 bch2_bkey_buf_init(&m->k);
419 bch2_bkey_buf_reassemble(&m->k, c, k);
420 m->btree_id = btree_id;
421 m->data_opts = data_opts;
423 bch2_write_op_init(&m->op, c, io_opts);
424 m->op.pos = bkey_start_pos(k.k);
425 m->op.version = k.k->version;
426 m->op.target = data_opts.target;
427 m->op.write_point = wp;
428 m->op.flags |= BCH_WRITE_PAGES_STABLE|
429 BCH_WRITE_PAGES_OWNED|
430 BCH_WRITE_DATA_ENCODED|
432 m->data_opts.write_flags;
433 m->op.compression_type =
434 bch2_compression_opt_to_type[io_opts.background_compression ?:
435 io_opts.compression];
436 if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
437 m->op.alloc_reserve = RESERVE_movinggc;
440 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
443 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
447 if (!((1U << i) & m->data_opts.rewrite_ptrs) &&
449 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
451 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
452 crc_is_compressed(p.crc))
453 reserve_sectors += k.k->size;
456 * op->csum_type is normally initialized from the fs/file's
457 * current options - but if an extent is encrypted, we require
458 * that it stays encrypted:
460 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
461 m->op.nonce = p.crc.nonce + p.crc.offset;
462 m->op.csum_type = p.crc.csum_type;
465 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
466 m->op.incompressible = true;
469 move_ctxt_wait_event(ctxt, trans,
470 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
471 PTR_BUCKET_POS(c, &p.ptr), 0)) ||
472 !atomic_read(&ctxt->read_sectors));
475 bch2_bucket_nocow_lock(&c->nocow_locks,
476 PTR_BUCKET_POS(c, &p.ptr), 0);
478 if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
479 PTR_BUCKET_POS(c, &p.ptr), 0)) {
480 ret = -BCH_ERR_nocow_lock_blocked;
484 ptrs_locked |= (1U << i);
488 if (reserve_sectors) {
489 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
490 m->data_opts.extra_replicas
492 : BCH_DISK_RESERVATION_NOFAIL);
497 m->op.nr_replicas = m->op.nr_replicas_required =
498 hweight32(m->data_opts.rewrite_ptrs) + m->data_opts.extra_replicas;
500 BUG_ON(!m->op.nr_replicas);
502 /* Special handling required: */
503 if (bkey_extent_is_unwritten(k))
504 return -BCH_ERR_unwritten_extent_update;
508 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
509 if ((1U << i) & ptrs_locked)
510 bch2_bucket_nocow_unlock(&c->nocow_locks,
511 PTR_BUCKET_POS(c, &p.ptr), 0);
515 bch2_bkey_buf_exit(&m->k, c);
516 bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
520 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
522 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
523 const struct bch_extent_ptr *ptr;
526 bkey_for_each_ptr(ptrs, ptr) {
527 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
528 opts->kill_ptrs |= 1U << i;
529 opts->rewrite_ptrs ^= 1U << i;