]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/data_update.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / data_update.c
index 3b442b01ca869c1a869e30c0ddaaf516bbd2d8d1..6f13477ff652e9e0552b9fbbb49009a5651d6d76 100644 (file)
 #include "buckets.h"
 #include "data_update.h"
 #include "ec.h"
+#include "error.h"
 #include "extents.h"
-#include "io.h"
+#include "io_write.h"
 #include "keylist.h"
 #include "move.h"
+#include "nocow_locking.h"
+#include "rebalance.h"
 #include "subvolume.h"
+#include "trace.h"
 
-#include <trace/events/bcachefs.h>
-
-static int insert_snapshot_whiteouts(struct btree_trans *trans,
-                                    enum btree_id id,
-                                    struct bpos old_pos,
-                                    struct bpos new_pos)
+static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
 {
-       struct bch_fs *c = trans->c;
-       struct btree_iter iter, update_iter;
-       struct bkey_s_c k;
-       snapshot_id_list s;
-       int ret;
+       if (trace_move_extent_finish_enabled()) {
+               struct printbuf buf = PRINTBUF;
 
-       if (!btree_type_has_snapshots(id))
-               return 0;
+               bch2_bkey_val_to_text(&buf, c, k);
+               trace_move_extent_finish(c, buf.buf);
+               printbuf_exit(&buf);
+       }
+}
 
-       darray_init(&s);
+static void trace_move_extent_fail2(struct data_update *m,
+                        struct bkey_s_c new,
+                        struct bkey_s_c wrote,
+                        struct bkey_i *insert,
+                        const char *msg)
+{
+       struct bch_fs *c = m->op.c;
+       struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
+       const union bch_extent_entry *entry;
+       struct bch_extent_ptr *ptr;
+       struct extent_ptr_decoded p;
+       struct printbuf buf = PRINTBUF;
+       unsigned i, rewrites_found = 0;
 
-       if (!bkey_cmp(old_pos, new_pos))
-               return 0;
+       if (!trace_move_extent_fail_enabled())
+               return;
 
-       if (!snapshot_t(c, old_pos.snapshot)->children[0])
-               return 0;
+       prt_str(&buf, msg);
 
-       bch2_trans_iter_init(trans, &iter, id, old_pos,
-                            BTREE_ITER_NOT_EXTENTS|
-                            BTREE_ITER_ALL_SNAPSHOTS);
-       while (1) {
-               k = bch2_btree_iter_prev(&iter);
-               ret = bkey_err(k);
-               if (ret)
-                       break;
+       if (insert) {
+               i = 0;
+               bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
+                       if (((1U << i) & m->data_opts.rewrite_ptrs) &&
+                           (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
+                           !ptr->cached)
+                               rewrites_found |= 1U << i;
+                       i++;
+               }
+       }
 
-               if (bkey_cmp(old_pos, k.k->p))
-                       break;
+       prt_printf(&buf, "\nrewrite ptrs:   %u%u%u%u",
+                  (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
+                  (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
+                  (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
+                  (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
 
-               if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
-                       struct bkey_i *update;
+       prt_printf(&buf, "\nrewrites found: %u%u%u%u",
+                  (rewrites_found & (1 << 0)) != 0,
+                  (rewrites_found & (1 << 1)) != 0,
+                  (rewrites_found & (1 << 2)) != 0,
+                  (rewrites_found & (1 << 3)) != 0);
 
-                       if (snapshot_list_has_ancestor(c, &s, k.k->p.snapshot))
-                               continue;
+       prt_str(&buf, "\nold:    ");
+       bch2_bkey_val_to_text(&buf, c, old);
 
-                       update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
+       prt_str(&buf, "\nnew:    ");
+       bch2_bkey_val_to_text(&buf, c, new);
 
-                       ret = PTR_ERR_OR_ZERO(update);
-                       if (ret)
-                               break;
-
-                       bkey_init(&update->k);
-                       update->k.p = new_pos;
-                       update->k.p.snapshot = k.k->p.snapshot;
-
-                       bch2_trans_iter_init(trans, &update_iter, id, update->k.p,
-                                            BTREE_ITER_NOT_EXTENTS|
-                                            BTREE_ITER_ALL_SNAPSHOTS|
-                                            BTREE_ITER_INTENT);
-                       ret   = bch2_btree_iter_traverse(&update_iter) ?:
-                               bch2_trans_update(trans, &update_iter, update,
-                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
-                       bch2_trans_iter_exit(trans, &update_iter);
-                       if (ret)
-                               break;
+       prt_str(&buf, "\nwrote:  ");
+       bch2_bkey_val_to_text(&buf, c, wrote);
 
-                       ret = snapshot_list_add(c, &s, k.k->p.snapshot);
-                       if (ret)
-                               break;
-               }
+       if (insert) {
+               prt_str(&buf, "\ninsert: ");
+               bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
        }
-       bch2_trans_iter_exit(trans, &iter);
-       darray_exit(&s);
 
-       return ret;
+       trace_move_extent_fail(c, buf.buf);
+       printbuf_exit(&buf);
 }
 
-static void bch2_bkey_mark_dev_cached(struct bkey_s k, unsigned dev)
-{
-       struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
-       struct bch_extent_ptr *ptr;
-
-       bkey_for_each_ptr(ptrs, ptr)
-               if (ptr->dev == dev)
-                       ptr->cached = true;
-}
-
-static int bch2_data_update_index_update(struct bch_write_op *op)
+static int __bch2_data_update_index_update(struct btree_trans *trans,
+                                          struct bch_write_op *op)
 {
        struct bch_fs *c = op->c;
-       struct btree_trans trans;
        struct btree_iter iter;
        struct data_update *m =
                container_of(op, struct data_update, op);
-       struct open_bucket *ec_ob = ec_open_bucket(c, &op->open_buckets);
        struct keylist *keys = &op->insert_keys;
        struct bkey_buf _new, _insert;
        int ret = 0;
@@ -113,26 +103,26 @@ static int bch2_data_update_index_update(struct bch_write_op *op)
        bch2_bkey_buf_init(&_insert);
        bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
 
-       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
-
-       bch2_trans_iter_init(&trans, &iter, m->btree_id,
+       bch2_trans_iter_init(trans, &iter, m->btree_id,
                             bkey_start_pos(&bch2_keylist_front(keys)->k),
                             BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
        while (1) {
                struct bkey_s_c k;
                struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
-               struct bkey_i *insert;
+               struct bkey_i *insert = NULL;
                struct bkey_i_extent *new;
-               const union bch_extent_entry *entry;
+               const union bch_extent_entry *entry_c;
+               union bch_extent_entry *entry;
                struct extent_ptr_decoded p;
+               struct bch_extent_ptr *ptr;
+               const struct bch_extent_ptr *ptr_c;
                struct bpos next_pos;
-               bool did_work = false;
                bool should_check_enospc;
                s64 i_sectors_delta = 0, disk_sectors_delta = 0;
-               unsigned i;
+               unsigned rewrites_found = 0, durability, i;
 
-               bch2_trans_begin(&trans);
+               bch2_trans_begin(trans);
 
                k = bch2_btree_iter_peek_slot(&iter);
                ret = bkey_err(k);
@@ -141,8 +131,11 @@ static int bch2_data_update_index_update(struct bch_write_op *op)
 
                new = bkey_i_to_extent(bch2_keylist_front(keys));
 
-               if (!bch2_extents_match(k, old))
-                       goto nomatch;
+               if (!bch2_extents_match(k, old)) {
+                       trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
+                                               NULL, "no match:");
+                       goto nowork;
+               }
 
                bkey_reassemble(_insert.k, k);
                insert = _insert.k;
@@ -165,45 +158,70 @@ static int bch2_data_update_index_update(struct bch_write_op *op)
                 * Fist, drop rewrite_ptrs from @new:
                 */
                i = 0;
-               bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
+               bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
                        if (((1U << i) & m->data_opts.rewrite_ptrs) &&
-                           bch2_extent_has_ptr(old, p, bkey_i_to_s_c(insert))) {
-                               /*
-                                * If we're going to be adding a pointer to the
-                                * same device, we have to drop the old one -
-                                * otherwise, we can just mark it cached:
-                                */
-                               if (bch2_bkey_has_device(bkey_i_to_s_c(&new->k_i), p.ptr.dev))
-                                       bch2_bkey_drop_device_noerror(bkey_i_to_s(insert), p.ptr.dev);
-                               else
-                                       bch2_bkey_mark_dev_cached(bkey_i_to_s(insert), p.ptr.dev);
+                           (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
+                           !ptr->cached) {
+                               bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
+                               rewrites_found |= 1U << i;
                        }
                        i++;
                }
 
+               if (m->data_opts.rewrite_ptrs &&
+                   !rewrites_found &&
+                   bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
+                       trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
+                       goto nowork;
+               }
 
-               /* Add new ptrs: */
-               extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) {
-                       if (bch2_bkey_has_device(bkey_i_to_s_c(insert), p.ptr.dev)) {
-                               /*
-                                * raced with another move op? extent already
-                                * has a pointer to the device we just wrote
-                                * data to
-                                */
-                               continue;
+               /*
+                * A replica that we just wrote might conflict with a replica
+                * that we want to keep, due to racing with another move:
+                */
+restart_drop_conflicting_replicas:
+               extent_for_each_ptr(extent_i_to_s(new), ptr)
+                       if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
+                           !ptr_c->cached) {
+                               bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
+                               goto restart_drop_conflicting_replicas;
                        }
 
-                       bch2_extent_ptr_decoded_append(insert, &p);
-                       did_work = true;
+               if (!bkey_val_u64s(&new->k)) {
+                       trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
+                       goto nowork;
+               }
+
+               /* Now, drop pointers that conflict with what we just wrote: */
+               extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
+                       if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
+                               bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
+
+               durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
+                       bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
+
+               /* Now, drop excess replicas: */
+restart_drop_extra_replicas:
+               bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
+                       unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
+
+                       if (!p.ptr.cached &&
+                           durability - ptr_durability >= m->op.opts.data_replicas) {
+                               durability -= ptr_durability;
+
+                               bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
+                               goto restart_drop_extra_replicas;
+                       }
                }
 
-               if (!did_work)
-                       goto nomatch;
+               /* Finally, add the pointers we just wrote: */
+               extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
+                       bch2_extent_ptr_decoded_append(insert, &p);
 
                bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
                bch2_extent_normalize(c, bkey_i_to_s(insert));
 
-               ret = bch2_sum_sector_overwrites(&trans, &iter, insert,
+               ret = bch2_sum_sector_overwrites(trans, &iter, insert,
                                                 &should_check_enospc,
                                                 &i_sectors_delta,
                                                 &disk_sectors_delta);
@@ -221,19 +239,67 @@ static int bch2_data_update_index_update(struct bch_write_op *op)
 
                next_pos = insert->k.p;
 
-               ret   = insert_snapshot_whiteouts(&trans, m->btree_id,
-                                                 k.k->p, insert->k.p) ?:
-                       bch2_trans_update(&trans, &iter, insert,
+               /*
+                * Check for nonce offset inconsistency:
+                * This is debug code - we've been seeing this bug rarely, and
+                * it's been hard to reproduce, so this should give us some more
+                * information when it does occur:
+                */
+               struct printbuf err = PRINTBUF;
+               int invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id), 0, &err);
+               printbuf_exit(&err);
+
+               if (invalid) {
+                       struct printbuf buf = PRINTBUF;
+
+                       prt_str(&buf, "about to insert invalid key in data update path");
+                       prt_str(&buf, "\nold: ");
+                       bch2_bkey_val_to_text(&buf, c, old);
+                       prt_str(&buf, "\nk:   ");
+                       bch2_bkey_val_to_text(&buf, c, k);
+                       prt_str(&buf, "\nnew: ");
+                       bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
+
+                       bch2_print_string_as_lines(KERN_ERR, buf.buf);
+                       printbuf_exit(&buf);
+
+                       bch2_fatal_error(c);
+                       goto out;
+               }
+
+               if (trace_data_update_enabled()) {
+                       struct printbuf buf = PRINTBUF;
+
+                       prt_str(&buf, "\nold: ");
+                       bch2_bkey_val_to_text(&buf, c, old);
+                       prt_str(&buf, "\nk:   ");
+                       bch2_bkey_val_to_text(&buf, c, k);
+                       prt_str(&buf, "\nnew: ");
+                       bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
+
+                       trace_data_update(c, buf.buf);
+                       printbuf_exit(&buf);
+               }
+
+               ret =   bch2_insert_snapshot_whiteouts(trans, m->btree_id,
+                                               k.k->p, bkey_start_pos(&insert->k)) ?:
+                       bch2_insert_snapshot_whiteouts(trans, m->btree_id,
+                                               k.k->p, insert->k.p) ?:
+                       bch2_bkey_set_needs_rebalance(c, insert,
+                                                     op->opts.background_target,
+                                                     op->opts.background_compression) ?:
+                       bch2_trans_update(trans, &iter, insert,
                                BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
-                       bch2_trans_commit(&trans, &op->res,
-                               op_journal_seq(op),
-                               BTREE_INSERT_NOFAIL|
+                       bch2_trans_commit(trans, &op->res,
+                               NULL,
+                               BCH_TRANS_COMMIT_no_check_rw|
+                               BCH_TRANS_COMMIT_no_enospc|
                                m->data_opts.btree_insert_flags);
                if (!ret) {
                        bch2_btree_iter_set_pos(&iter, next_pos);
-                       atomic_long_inc(&c->extent_migrate_done);
-                       if (ec_ob)
-                               bch2_ob_add_backpointer(c, ec_ob, &insert->k);
+
+                       this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
+                       trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
                }
 err:
                if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -241,44 +307,40 @@ err:
                if (ret)
                        break;
 next:
-               while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
+               while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
                        bch2_keylist_pop_front(keys);
                        if (bch2_keylist_empty(keys))
                                goto out;
                }
                continue;
-nomatch:
-               if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
-                       struct printbuf buf = PRINTBUF;
-
-                       bch2_bkey_val_to_text(&buf, c, old);
-                       bch_info(c, "no match for %s", buf.buf);
-                       printbuf_exit(&buf);
-               }
-
-               if (m->ctxt) {
+nowork:
+               if (m->stats) {
                        BUG_ON(k.k->p.offset <= iter.pos.offset);
-                       atomic64_inc(&m->ctxt->stats->keys_raced);
+                       atomic64_inc(&m->stats->keys_raced);
                        atomic64_add(k.k->p.offset - iter.pos.offset,
-                                    &m->ctxt->stats->sectors_raced);
+                                    &m->stats->sectors_raced);
                }
-               atomic_long_inc(&c->extent_migrate_raced);
-               trace_move_race(&new->k);
+
+               count_event(c, move_extent_fail);
+
                bch2_btree_iter_advance(&iter);
                goto next;
        }
 out:
-       bch2_trans_iter_exit(&trans, &iter);
-       bch2_trans_exit(&trans);
+       bch2_trans_iter_exit(trans, &iter);
        bch2_bkey_buf_exit(&_insert, c);
        bch2_bkey_buf_exit(&_new, c);
        BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
        return ret;
 }
 
+int bch2_data_update_index_update(struct bch_write_op *op)
+{
+       return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
+}
+
 void bch2_data_update_read_done(struct data_update *m,
-                               struct bch_extent_crc_unpacked crc,
-                               struct closure *cl)
+                               struct bch_extent_crc_unpacked crc)
 {
        /* write bio must own pages: */
        BUG_ON(!m->op.wbio.bio.bi_vcnt);
@@ -286,64 +348,212 @@ void bch2_data_update_read_done(struct data_update *m,
        m->op.crc = crc;
        m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
 
-       closure_call(&m->op.cl, bch2_write, NULL, cl);
+       closure_call(&m->op.cl, bch2_write, NULL, NULL);
 }
 
 void bch2_data_update_exit(struct data_update *update)
 {
        struct bch_fs *c = update->op.c;
+       struct bkey_ptrs_c ptrs =
+               bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
+
+       bkey_for_each_ptr(ptrs, ptr) {
+               if (c->opts.nocow_enabled)
+                       bch2_bucket_nocow_unlock(&c->nocow_locks,
+                                                PTR_BUCKET_POS(c, ptr), 0);
+               percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
+       }
 
        bch2_bkey_buf_exit(&update->k, c);
        bch2_disk_reservation_put(c, &update->op.res);
        bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
 }
 
-int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
+static void bch2_update_unwritten_extent(struct btree_trans *trans,
+                                 struct data_update *update)
+{
+       struct bch_fs *c = update->op.c;
+       struct bio *bio = &update->op.wbio.bio;
+       struct bkey_i_extent *e;
+       struct write_point *wp;
+       struct closure cl;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       int ret;
+
+       closure_init_stack(&cl);
+       bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
+
+       while (bio_sectors(bio)) {
+               unsigned sectors = bio_sectors(bio);
+
+               bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
+                                    BTREE_ITER_SLOTS);
+               ret = lockrestart_do(trans, ({
+                       k = bch2_btree_iter_peek_slot(&iter);
+                       bkey_err(k);
+               }));
+               bch2_trans_iter_exit(trans, &iter);
+
+               if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
+                       break;
+
+               e = bkey_extent_init(update->op.insert_keys.top);
+               e->k.p = update->op.pos;
+
+               ret = bch2_alloc_sectors_start_trans(trans,
+                               update->op.target,
+                               false,
+                               update->op.write_point,
+                               &update->op.devs_have,
+                               update->op.nr_replicas,
+                               update->op.nr_replicas,
+                               update->op.watermark,
+                               0, &cl, &wp);
+               if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
+                       bch2_trans_unlock(trans);
+                       closure_sync(&cl);
+                       continue;
+               }
+
+               bch_err_fn_ratelimited(c, ret);
+
+               if (ret)
+                       return;
+
+               sectors = min(sectors, wp->sectors_free);
+
+               bch2_key_resize(&e->k, sectors);
+
+               bch2_open_bucket_get(c, wp, &update->op.open_buckets);
+               bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
+               bch2_alloc_sectors_done(c, wp);
+
+               bio_advance(bio, sectors << 9);
+               update->op.pos.offset += sectors;
+
+               extent_for_each_ptr(extent_i_to_s(e), ptr)
+                       ptr->unwritten = true;
+               bch2_keylist_push(&update->op.insert_keys);
+
+               ret = __bch2_data_update_index_update(trans, &update->op);
+
+               bch2_open_buckets_put(c, &update->op.open_buckets);
+
+               if (ret)
+                       break;
+       }
+
+       if (closure_nr_remaining(&cl) != 1) {
+               bch2_trans_unlock(trans);
+               closure_sync(&cl);
+       }
+}
+
+int bch2_extent_drop_ptrs(struct btree_trans *trans,
+                         struct btree_iter *iter,
+                         struct bkey_s_c k,
+                         struct data_update_opts data_opts)
+{
+       struct bch_fs *c = trans->c;
+       struct bkey_i *n;
+       int ret;
+
+       n = bch2_bkey_make_mut_noupdate(trans, k);
+       ret = PTR_ERR_OR_ZERO(n);
+       if (ret)
+               return ret;
+
+       while (data_opts.kill_ptrs) {
+               unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
+               struct bch_extent_ptr *ptr;
+
+               bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
+               data_opts.kill_ptrs ^= 1U << drop;
+       }
+
+       /*
+        * If the new extent no longer has any pointers, bch2_extent_normalize()
+        * will do the appropriate thing with it (turning it into a
+        * KEY_TYPE_error key, or just a discard if it was a cached extent)
+        */
+       bch2_extent_normalize(c, bkey_i_to_s(n));
+
+       /*
+        * Since we're not inserting through an extent iterator
+        * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
+        * we aren't using the extent overwrite path to delete, we're
+        * just using the normal key deletion path:
+        */
+       if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+               n->k.size = 0;
+
+       return bch2_trans_relock(trans) ?:
+               bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+               bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
+}
+
+int bch2_data_update_init(struct btree_trans *trans,
+                         struct btree_iter *iter,
+                         struct moving_context *ctxt,
+                         struct data_update *m,
                          struct write_point_specifier wp,
                          struct bch_io_opts io_opts,
                          struct data_update_opts data_opts,
                          enum btree_id btree_id,
                          struct bkey_s_c k)
 {
+       struct bch_fs *c = trans->c;
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
        const union bch_extent_entry *entry;
        struct extent_ptr_decoded p;
        unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
-       int ret;
+       unsigned ptrs_locked = 0;
+       int ret = 0;
 
        bch2_bkey_buf_init(&m->k);
        bch2_bkey_buf_reassemble(&m->k, c, k);
        m->btree_id     = btree_id;
        m->data_opts    = data_opts;
+       m->ctxt         = ctxt;
+       m->stats        = ctxt ? ctxt->stats : NULL;
 
        bch2_write_op_init(&m->op, c, io_opts);
        m->op.pos       = bkey_start_pos(k.k);
        m->op.version   = k.k->version;
-       m->op.target    = data_opts.target,
+       m->op.target    = data_opts.target;
        m->op.write_point = wp;
+       m->op.nr_replicas = 0;
        m->op.flags     |= BCH_WRITE_PAGES_STABLE|
                BCH_WRITE_PAGES_OWNED|
                BCH_WRITE_DATA_ENCODED|
-               BCH_WRITE_FROM_INTERNAL|
+               BCH_WRITE_MOVE|
                m->data_opts.write_flags;
-       m->op.compression_type =
-               bch2_compression_opt_to_type[io_opts.background_compression ?:
-                                            io_opts.compression];
-       if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
-               m->op.alloc_reserve = RESERVE_movinggc;
-       m->op.index_update_fn   = bch2_data_update_index_update;
+       m->op.compression_opt   = io_opts.background_compression ?: io_opts.compression;
+       m->op.watermark         = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
+
+       bkey_for_each_ptr(ptrs, ptr)
+               percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
+
+       unsigned durability_have = 0, durability_removing = 0;
 
        i = 0;
        bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
-               if (p.ptr.cached)
-                       m->data_opts.rewrite_ptrs &= ~(1U << i);
+               bool locked;
 
-               if (!((1U << i) & m->data_opts.rewrite_ptrs))
-                       bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
+               if (((1U << i) & m->data_opts.rewrite_ptrs)) {
+                       BUG_ON(p.ptr.cached);
 
-               if (((1U << i) & m->data_opts.rewrite_ptrs) &&
-                   crc_is_compressed(p.crc))
-                       reserve_sectors += k.k->size;
+                       if (crc_is_compressed(p.crc))
+                               reserve_sectors += k.k->size;
+
+                       m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
+                       durability_removing += bch2_extent_ptr_desired_durability(c, &p);
+               } else if (!p.ptr.cached &&
+                          !((1U << i) & m->data_opts.kill_ptrs)) {
+                       bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
+                       durability_have += bch2_extent_ptr_durability(c, &p);
+               }
 
                /*
                 * op->csum_type is normally initialized from the fs/file's
@@ -358,19 +568,98 @@ int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
                if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
                        m->op.incompressible = true;
 
+               if (c->opts.nocow_enabled) {
+                       if (ctxt) {
+                               move_ctxt_wait_event(ctxt,
+                                               (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
+                                                                         PTR_BUCKET_POS(c, &p.ptr), 0)) ||
+                                               (!atomic_read(&ctxt->read_sectors) &&
+                                                !atomic_read(&ctxt->write_sectors)));
+
+                               if (!locked)
+                                       bch2_bucket_nocow_lock(&c->nocow_locks,
+                                                              PTR_BUCKET_POS(c, &p.ptr), 0);
+                       } else {
+                               if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
+                                                              PTR_BUCKET_POS(c, &p.ptr), 0)) {
+                                       ret = -BCH_ERR_nocow_lock_blocked;
+                                       goto err;
+                               }
+                       }
+                       ptrs_locked |= (1U << i);
+               }
+
                i++;
        }
 
+       /*
+        * If current extent durability is less than io_opts.data_replicas,
+        * we're not trying to rereplicate the extent up to data_replicas here -
+        * unless extra_replicas was specified
+        *
+        * Increasing replication is an explicit operation triggered by
+        * rereplicate, currently, so that users don't get an unexpected -ENOSPC
+        */
+       if (!(m->data_opts.write_flags & BCH_WRITE_CACHED) &&
+           durability_have >= io_opts.data_replicas) {
+               m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
+               m->data_opts.rewrite_ptrs = 0;
+               /* if iter == NULL, it's just a promote */
+               if (iter)
+                       ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts);
+               goto done;
+       }
+
+       m->op.nr_replicas = min(durability_removing, io_opts.data_replicas - durability_have) +
+               m->data_opts.extra_replicas;
+       m->op.nr_replicas_required = m->op.nr_replicas;
+
+       BUG_ON(!m->op.nr_replicas);
+
        if (reserve_sectors) {
                ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
                                m->data_opts.extra_replicas
                                ? 0
                                : BCH_DISK_RESERVATION_NOFAIL);
                if (ret)
-                       return ret;
+                       goto err;
+       }
+
+       if (bkey_extent_is_unwritten(k)) {
+               bch2_update_unwritten_extent(trans, m);
+               goto done;
        }
 
-       m->op.nr_replicas = m->op.nr_replicas_required =
-               hweight32(m->data_opts.rewrite_ptrs) + m->data_opts.extra_replicas;
        return 0;
+err:
+       i = 0;
+       bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+               if ((1U << i) & ptrs_locked)
+                       bch2_bucket_nocow_unlock(&c->nocow_locks,
+                                                PTR_BUCKET_POS(c, &p.ptr), 0);
+               percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
+               i++;
+       }
+
+       bch2_bkey_buf_exit(&m->k, c);
+       bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
+       return ret;
+done:
+       bch2_data_update_exit(m);
+       return ret ?: -BCH_ERR_data_update_done;
+}
+
+void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
+{
+       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+       unsigned i = 0;
+
+       bkey_for_each_ptr(ptrs, ptr) {
+               if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
+                       opts->kill_ptrs |= 1U << i;
+                       opts->rewrite_ptrs ^= 1U << i;
+               }
+
+               i++;
+       }
 }