]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/data_update.c
Disable pristine-tar option in gbp.conf, since there is no pristine-tar branch.
[bcachefs-tools-debian] / libbcachefs / data_update.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "bkey_buf.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "data_update.h"
9 #include "ec.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "io_write.h"
13 #include "keylist.h"
14 #include "move.h"
15 #include "nocow_locking.h"
16 #include "rebalance.h"
17 #include "subvolume.h"
18 #include "trace.h"
19
20 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
21 {
22         if (trace_move_extent_finish_enabled()) {
23                 struct printbuf buf = PRINTBUF;
24
25                 bch2_bkey_val_to_text(&buf, c, k);
26                 trace_move_extent_finish(c, buf.buf);
27                 printbuf_exit(&buf);
28         }
29 }
30
31 static void trace_move_extent_fail2(struct data_update *m,
32                          struct bkey_s_c new,
33                          struct bkey_s_c wrote,
34                          struct bkey_i *insert,
35                          const char *msg)
36 {
37         struct bch_fs *c = m->op.c;
38         struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
39         const union bch_extent_entry *entry;
40         struct bch_extent_ptr *ptr;
41         struct extent_ptr_decoded p;
42         struct printbuf buf = PRINTBUF;
43         unsigned i, rewrites_found = 0;
44
45         if (!trace_move_extent_fail_enabled())
46                 return;
47
48         prt_str(&buf, msg);
49
50         if (insert) {
51                 i = 0;
52                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
53                         if (((1U << i) & m->data_opts.rewrite_ptrs) &&
54                             (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
55                             !ptr->cached)
56                                 rewrites_found |= 1U << i;
57                         i++;
58                 }
59         }
60
61         prt_printf(&buf, "\nrewrite ptrs:   %u%u%u%u",
62                    (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
63                    (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
64                    (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
65                    (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
66
67         prt_printf(&buf, "\nrewrites found: %u%u%u%u",
68                    (rewrites_found & (1 << 0)) != 0,
69                    (rewrites_found & (1 << 1)) != 0,
70                    (rewrites_found & (1 << 2)) != 0,
71                    (rewrites_found & (1 << 3)) != 0);
72
73         prt_str(&buf, "\nold:    ");
74         bch2_bkey_val_to_text(&buf, c, old);
75
76         prt_str(&buf, "\nnew:    ");
77         bch2_bkey_val_to_text(&buf, c, new);
78
79         prt_str(&buf, "\nwrote:  ");
80         bch2_bkey_val_to_text(&buf, c, wrote);
81
82         if (insert) {
83                 prt_str(&buf, "\ninsert: ");
84                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
85         }
86
87         trace_move_extent_fail(c, buf.buf);
88         printbuf_exit(&buf);
89 }
90
91 static int __bch2_data_update_index_update(struct btree_trans *trans,
92                                            struct bch_write_op *op)
93 {
94         struct bch_fs *c = op->c;
95         struct btree_iter iter;
96         struct data_update *m =
97                 container_of(op, struct data_update, op);
98         struct keylist *keys = &op->insert_keys;
99         struct bkey_buf _new, _insert;
100         int ret = 0;
101
102         bch2_bkey_buf_init(&_new);
103         bch2_bkey_buf_init(&_insert);
104         bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
105
106         bch2_trans_iter_init(trans, &iter, m->btree_id,
107                              bkey_start_pos(&bch2_keylist_front(keys)->k),
108                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
109
110         while (1) {
111                 struct bkey_s_c k;
112                 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
113                 struct bkey_i *insert = NULL;
114                 struct bkey_i_extent *new;
115                 const union bch_extent_entry *entry_c;
116                 union bch_extent_entry *entry;
117                 struct extent_ptr_decoded p;
118                 struct bch_extent_ptr *ptr;
119                 const struct bch_extent_ptr *ptr_c;
120                 struct bpos next_pos;
121                 bool should_check_enospc;
122                 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
123                 unsigned rewrites_found = 0, durability, i;
124
125                 bch2_trans_begin(trans);
126
127                 k = bch2_btree_iter_peek_slot(&iter);
128                 ret = bkey_err(k);
129                 if (ret)
130                         goto err;
131
132                 new = bkey_i_to_extent(bch2_keylist_front(keys));
133
134                 if (!bch2_extents_match(k, old)) {
135                         trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
136                                                 NULL, "no match:");
137                         goto nowork;
138                 }
139
140                 bkey_reassemble(_insert.k, k);
141                 insert = _insert.k;
142
143                 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
144                 new = bkey_i_to_extent(_new.k);
145                 bch2_cut_front(iter.pos, &new->k_i);
146
147                 bch2_cut_front(iter.pos,        insert);
148                 bch2_cut_back(new->k.p,         insert);
149                 bch2_cut_back(insert->k.p,      &new->k_i);
150
151                 /*
152                  * @old: extent that we read from
153                  * @insert: key that we're going to update, initialized from
154                  * extent currently in btree - same as @old unless we raced with
155                  * other updates
156                  * @new: extent with new pointers that we'll be adding to @insert
157                  *
158                  * Fist, drop rewrite_ptrs from @new:
159                  */
160                 i = 0;
161                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
162                         if (((1U << i) & m->data_opts.rewrite_ptrs) &&
163                             (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
164                             !ptr->cached) {
165                                 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
166                                 rewrites_found |= 1U << i;
167                         }
168                         i++;
169                 }
170
171                 if (m->data_opts.rewrite_ptrs &&
172                     !rewrites_found &&
173                     bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
174                         trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
175                         goto nowork;
176                 }
177
178                 /*
179                  * A replica that we just wrote might conflict with a replica
180                  * that we want to keep, due to racing with another move:
181                  */
182 restart_drop_conflicting_replicas:
183                 extent_for_each_ptr(extent_i_to_s(new), ptr)
184                         if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
185                             !ptr_c->cached) {
186                                 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
187                                 goto restart_drop_conflicting_replicas;
188                         }
189
190                 if (!bkey_val_u64s(&new->k)) {
191                         trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
192                         goto nowork;
193                 }
194
195                 /* Now, drop pointers that conflict with what we just wrote: */
196                 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
197                         if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
198                                 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
199
200                 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
201                         bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
202
203                 /* Now, drop excess replicas: */
204 restart_drop_extra_replicas:
205                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
206                         unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
207
208                         if (!p.ptr.cached &&
209                             durability - ptr_durability >= m->op.opts.data_replicas) {
210                                 durability -= ptr_durability;
211
212                                 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
213                                 goto restart_drop_extra_replicas;
214                         }
215                 }
216
217                 /* Finally, add the pointers we just wrote: */
218                 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
219                         bch2_extent_ptr_decoded_append(insert, &p);
220
221                 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
222                 bch2_extent_normalize(c, bkey_i_to_s(insert));
223
224                 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
225                                                  &should_check_enospc,
226                                                  &i_sectors_delta,
227                                                  &disk_sectors_delta);
228                 if (ret)
229                         goto err;
230
231                 if (disk_sectors_delta > (s64) op->res.sectors) {
232                         ret = bch2_disk_reservation_add(c, &op->res,
233                                                 disk_sectors_delta - op->res.sectors,
234                                                 !should_check_enospc
235                                                 ? BCH_DISK_RESERVATION_NOFAIL : 0);
236                         if (ret)
237                                 goto out;
238                 }
239
240                 next_pos = insert->k.p;
241
242                 /*
243                  * Check for nonce offset inconsistency:
244                  * This is debug code - we've been seeing this bug rarely, and
245                  * it's been hard to reproduce, so this should give us some more
246                  * information when it does occur:
247                  */
248                 struct printbuf err = PRINTBUF;
249                 int invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id), 0, &err);
250                 printbuf_exit(&err);
251
252                 if (invalid) {
253                         struct printbuf buf = PRINTBUF;
254
255                         prt_str(&buf, "about to insert invalid key in data update path");
256                         prt_str(&buf, "\nold: ");
257                         bch2_bkey_val_to_text(&buf, c, old);
258                         prt_str(&buf, "\nk:   ");
259                         bch2_bkey_val_to_text(&buf, c, k);
260                         prt_str(&buf, "\nnew: ");
261                         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
262
263                         bch2_print_string_as_lines(KERN_ERR, buf.buf);
264                         printbuf_exit(&buf);
265
266                         bch2_fatal_error(c);
267                         goto out;
268                 }
269
270                 if (trace_data_update_enabled()) {
271                         struct printbuf buf = PRINTBUF;
272
273                         prt_str(&buf, "\nold: ");
274                         bch2_bkey_val_to_text(&buf, c, old);
275                         prt_str(&buf, "\nk:   ");
276                         bch2_bkey_val_to_text(&buf, c, k);
277                         prt_str(&buf, "\nnew: ");
278                         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
279
280                         trace_data_update(c, buf.buf);
281                         printbuf_exit(&buf);
282                 }
283
284                 ret =   bch2_insert_snapshot_whiteouts(trans, m->btree_id,
285                                                 k.k->p, bkey_start_pos(&insert->k)) ?:
286                         bch2_insert_snapshot_whiteouts(trans, m->btree_id,
287                                                 k.k->p, insert->k.p) ?:
288                         bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
289                         bch2_trans_update(trans, &iter, insert,
290                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
291                         bch2_trans_commit(trans, &op->res,
292                                 NULL,
293                                 BCH_TRANS_COMMIT_no_check_rw|
294                                 BCH_TRANS_COMMIT_no_enospc|
295                                 m->data_opts.btree_insert_flags);
296                 if (!ret) {
297                         bch2_btree_iter_set_pos(&iter, next_pos);
298
299                         this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
300                         trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
301                 }
302 err:
303                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
304                         ret = 0;
305                 if (ret)
306                         break;
307 next:
308                 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
309                         bch2_keylist_pop_front(keys);
310                         if (bch2_keylist_empty(keys))
311                                 goto out;
312                 }
313                 continue;
314 nowork:
315                 if (m->stats) {
316                         BUG_ON(k.k->p.offset <= iter.pos.offset);
317                         atomic64_inc(&m->stats->keys_raced);
318                         atomic64_add(k.k->p.offset - iter.pos.offset,
319                                      &m->stats->sectors_raced);
320                 }
321
322                 count_event(c, move_extent_fail);
323
324                 bch2_btree_iter_advance(&iter);
325                 goto next;
326         }
327 out:
328         bch2_trans_iter_exit(trans, &iter);
329         bch2_bkey_buf_exit(&_insert, c);
330         bch2_bkey_buf_exit(&_new, c);
331         BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
332         return ret;
333 }
334
335 int bch2_data_update_index_update(struct bch_write_op *op)
336 {
337         return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
338 }
339
340 void bch2_data_update_read_done(struct data_update *m,
341                                 struct bch_extent_crc_unpacked crc)
342 {
343         /* write bio must own pages: */
344         BUG_ON(!m->op.wbio.bio.bi_vcnt);
345
346         m->op.crc = crc;
347         m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
348
349         closure_call(&m->op.cl, bch2_write, NULL, NULL);
350 }
351
352 void bch2_data_update_exit(struct data_update *update)
353 {
354         struct bch_fs *c = update->op.c;
355         struct bkey_ptrs_c ptrs =
356                 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
357
358         bkey_for_each_ptr(ptrs, ptr) {
359                 if (c->opts.nocow_enabled)
360                         bch2_bucket_nocow_unlock(&c->nocow_locks,
361                                                  PTR_BUCKET_POS(c, ptr), 0);
362                 percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
363         }
364
365         bch2_bkey_buf_exit(&update->k, c);
366         bch2_disk_reservation_put(c, &update->op.res);
367         bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
368 }
369
370 static void bch2_update_unwritten_extent(struct btree_trans *trans,
371                                   struct data_update *update)
372 {
373         struct bch_fs *c = update->op.c;
374         struct bio *bio = &update->op.wbio.bio;
375         struct bkey_i_extent *e;
376         struct write_point *wp;
377         struct closure cl;
378         struct btree_iter iter;
379         struct bkey_s_c k;
380         int ret;
381
382         closure_init_stack(&cl);
383         bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
384
385         while (bio_sectors(bio)) {
386                 unsigned sectors = bio_sectors(bio);
387
388                 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
389                                      BTREE_ITER_SLOTS);
390                 ret = lockrestart_do(trans, ({
391                         k = bch2_btree_iter_peek_slot(&iter);
392                         bkey_err(k);
393                 }));
394                 bch2_trans_iter_exit(trans, &iter);
395
396                 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
397                         break;
398
399                 e = bkey_extent_init(update->op.insert_keys.top);
400                 e->k.p = update->op.pos;
401
402                 ret = bch2_alloc_sectors_start_trans(trans,
403                                 update->op.target,
404                                 false,
405                                 update->op.write_point,
406                                 &update->op.devs_have,
407                                 update->op.nr_replicas,
408                                 update->op.nr_replicas,
409                                 update->op.watermark,
410                                 0, &cl, &wp);
411                 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
412                         bch2_trans_unlock(trans);
413                         closure_sync(&cl);
414                         continue;
415                 }
416
417                 bch_err_fn_ratelimited(c, ret);
418
419                 if (ret)
420                         return;
421
422                 sectors = min(sectors, wp->sectors_free);
423
424                 bch2_key_resize(&e->k, sectors);
425
426                 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
427                 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
428                 bch2_alloc_sectors_done(c, wp);
429
430                 bio_advance(bio, sectors << 9);
431                 update->op.pos.offset += sectors;
432
433                 extent_for_each_ptr(extent_i_to_s(e), ptr)
434                         ptr->unwritten = true;
435                 bch2_keylist_push(&update->op.insert_keys);
436
437                 ret = __bch2_data_update_index_update(trans, &update->op);
438
439                 bch2_open_buckets_put(c, &update->op.open_buckets);
440
441                 if (ret)
442                         break;
443         }
444
445         if (closure_nr_remaining(&cl) != 1) {
446                 bch2_trans_unlock(trans);
447                 closure_sync(&cl);
448         }
449 }
450
451 int bch2_extent_drop_ptrs(struct btree_trans *trans,
452                           struct btree_iter *iter,
453                           struct bkey_s_c k,
454                           struct data_update_opts data_opts)
455 {
456         struct bch_fs *c = trans->c;
457         struct bkey_i *n;
458         int ret;
459
460         n = bch2_bkey_make_mut_noupdate(trans, k);
461         ret = PTR_ERR_OR_ZERO(n);
462         if (ret)
463                 return ret;
464
465         while (data_opts.kill_ptrs) {
466                 unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
467                 struct bch_extent_ptr *ptr;
468
469                 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
470                 data_opts.kill_ptrs ^= 1U << drop;
471         }
472
473         /*
474          * If the new extent no longer has any pointers, bch2_extent_normalize()
475          * will do the appropriate thing with it (turning it into a
476          * KEY_TYPE_error key, or just a discard if it was a cached extent)
477          */
478         bch2_extent_normalize(c, bkey_i_to_s(n));
479
480         /*
481          * Since we're not inserting through an extent iterator
482          * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
483          * we aren't using the extent overwrite path to delete, we're
484          * just using the normal key deletion path:
485          */
486         if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_IS_EXTENTS))
487                 n->k.size = 0;
488
489         return bch2_trans_relock(trans) ?:
490                 bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
491                 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
492 }
493
494 int bch2_data_update_init(struct btree_trans *trans,
495                           struct btree_iter *iter,
496                           struct moving_context *ctxt,
497                           struct data_update *m,
498                           struct write_point_specifier wp,
499                           struct bch_io_opts io_opts,
500                           struct data_update_opts data_opts,
501                           enum btree_id btree_id,
502                           struct bkey_s_c k)
503 {
504         struct bch_fs *c = trans->c;
505         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
506         const union bch_extent_entry *entry;
507         struct extent_ptr_decoded p;
508         unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
509         unsigned ptrs_locked = 0;
510         int ret = 0;
511
512         bch2_bkey_buf_init(&m->k);
513         bch2_bkey_buf_reassemble(&m->k, c, k);
514         m->btree_id     = btree_id;
515         m->data_opts    = data_opts;
516         m->ctxt         = ctxt;
517         m->stats        = ctxt ? ctxt->stats : NULL;
518
519         bch2_write_op_init(&m->op, c, io_opts);
520         m->op.pos       = bkey_start_pos(k.k);
521         m->op.version   = k.k->version;
522         m->op.target    = data_opts.target;
523         m->op.write_point = wp;
524         m->op.nr_replicas = 0;
525         m->op.flags     |= BCH_WRITE_PAGES_STABLE|
526                 BCH_WRITE_PAGES_OWNED|
527                 BCH_WRITE_DATA_ENCODED|
528                 BCH_WRITE_MOVE|
529                 m->data_opts.write_flags;
530         m->op.compression_opt   = background_compression(io_opts);
531         m->op.watermark         = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
532
533         bkey_for_each_ptr(ptrs, ptr)
534                 percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
535
536         unsigned durability_have = 0, durability_removing = 0;
537
538         i = 0;
539         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
540                 bool locked;
541
542                 if (((1U << i) & m->data_opts.rewrite_ptrs)) {
543                         BUG_ON(p.ptr.cached);
544
545                         if (crc_is_compressed(p.crc))
546                                 reserve_sectors += k.k->size;
547
548                         m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
549                         durability_removing += bch2_extent_ptr_desired_durability(c, &p);
550                 } else if (!p.ptr.cached &&
551                            !((1U << i) & m->data_opts.kill_ptrs)) {
552                         bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
553                         durability_have += bch2_extent_ptr_durability(c, &p);
554                 }
555
556                 /*
557                  * op->csum_type is normally initialized from the fs/file's
558                  * current options - but if an extent is encrypted, we require
559                  * that it stays encrypted:
560                  */
561                 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
562                         m->op.nonce     = p.crc.nonce + p.crc.offset;
563                         m->op.csum_type = p.crc.csum_type;
564                 }
565
566                 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
567                         m->op.incompressible = true;
568
569                 if (c->opts.nocow_enabled) {
570                         if (ctxt) {
571                                 move_ctxt_wait_event(ctxt,
572                                                 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
573                                                                           PTR_BUCKET_POS(c, &p.ptr), 0)) ||
574                                                 (!atomic_read(&ctxt->read_sectors) &&
575                                                  !atomic_read(&ctxt->write_sectors)));
576
577                                 if (!locked)
578                                         bch2_bucket_nocow_lock(&c->nocow_locks,
579                                                                PTR_BUCKET_POS(c, &p.ptr), 0);
580                         } else {
581                                 if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
582                                                                PTR_BUCKET_POS(c, &p.ptr), 0)) {
583                                         ret = -BCH_ERR_nocow_lock_blocked;
584                                         goto err;
585                                 }
586                         }
587                         ptrs_locked |= (1U << i);
588                 }
589
590                 i++;
591         }
592
593         /*
594          * If current extent durability is less than io_opts.data_replicas,
595          * we're not trying to rereplicate the extent up to data_replicas here -
596          * unless extra_replicas was specified
597          *
598          * Increasing replication is an explicit operation triggered by
599          * rereplicate, currently, so that users don't get an unexpected -ENOSPC
600          */
601         if (!(m->data_opts.write_flags & BCH_WRITE_CACHED) &&
602             durability_have >= io_opts.data_replicas) {
603                 m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
604                 m->data_opts.rewrite_ptrs = 0;
605                 /* if iter == NULL, it's just a promote */
606                 if (iter)
607                         ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts);
608                 goto done;
609         }
610
611         m->op.nr_replicas = min(durability_removing, io_opts.data_replicas - durability_have) +
612                 m->data_opts.extra_replicas;
613         m->op.nr_replicas_required = m->op.nr_replicas;
614
615         BUG_ON(!m->op.nr_replicas);
616
617         if (reserve_sectors) {
618                 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
619                                 m->data_opts.extra_replicas
620                                 ? 0
621                                 : BCH_DISK_RESERVATION_NOFAIL);
622                 if (ret)
623                         goto err;
624         }
625
626         if (bkey_extent_is_unwritten(k)) {
627                 bch2_update_unwritten_extent(trans, m);
628                 goto done;
629         }
630
631         return 0;
632 err:
633         i = 0;
634         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
635                 if ((1U << i) & ptrs_locked)
636                         bch2_bucket_nocow_unlock(&c->nocow_locks,
637                                                  PTR_BUCKET_POS(c, &p.ptr), 0);
638                 percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
639                 i++;
640         }
641
642         bch2_bkey_buf_exit(&m->k, c);
643         bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
644         return ret;
645 done:
646         bch2_data_update_exit(m);
647         return ret ?: -BCH_ERR_data_update_done;
648 }
649
650 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
651 {
652         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
653         unsigned i = 0;
654
655         bkey_for_each_ptr(ptrs, ptr) {
656                 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
657                         opts->kill_ptrs |= 1U << i;
658                         opts->rewrite_ptrs ^= 1U << i;
659                 }
660
661                 i++;
662         }
663 }