+// SPDX-License-Identifier: GPL-2.0
/*
* Code for moving data off a device.
*/
#include "bcachefs.h"
+#include "bkey_buf.h"
#include "btree_update.h"
+#include "btree_update_interior.h"
#include "buckets.h"
+#include "errcode.h"
#include "extents.h"
-#include "io.h"
+#include "io_write.h"
#include "journal.h"
#include "keylist.h"
#include "migrate.h"
#include "move.h"
+#include "replicas.h"
#include "super-io.h"
-static bool migrate_pred(void *arg, struct bkey_s_c_extent e)
-{
- struct bch_dev *ca = arg;
- const struct bch_extent_ptr *ptr;
-
- extent_for_each_ptr(e, ptr)
- if (ptr->dev == ca->dev_idx)
- return true;
-
- return false;
-}
-
-#define MAX_DATA_OFF_ITER 10
-
-static int bch2_dev_usrdata_migrate(struct bch_fs *c, struct bch_dev *ca,
- int flags)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- u64 keys_moved, sectors_moved;
- unsigned pass = 0;
- int ret = 0;
-
- BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
-
- if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_USER)))
- return 0;
-
- /*
- * In theory, only one pass should be necessary as we've
- * quiesced all writes before calling this.
- *
- * However, in practice, more than one pass may be necessary:
- * - Some move fails due to an error. We can can find this out
- * from the moving_context.
- * - Some key swap failed because some of the pointers in the
- * key in the tree changed due to caching behavior, btree gc
- * pruning stale pointers, or tiering (if the device being
- * removed is in tier 0). A smarter bkey_cmpxchg would
- * handle these cases.
- *
- * Thus this scans the tree one more time than strictly necessary,
- * but that can be viewed as a verification pass.
- */
- do {
- ret = bch2_move_data(c, NULL,
- SECTORS_IN_FLIGHT_PER_DEVICE,
- NULL,
- writepoint_hashed((unsigned long) current),
- 0,
- ca->dev_idx,
- migrate_pred, ca,
- &keys_moved,
- §ors_moved);
- if (ret) {
- bch_err(c, "error migrating data: %i", ret);
- return ret;
- }
- } while (keys_moved && pass++ < MAX_DATA_OFF_ITER);
-
- if (keys_moved) {
- bch_err(c, "unable to migrate all data in %d iterations",
- MAX_DATA_OFF_ITER);
- return -1;
- }
-
- mutex_lock(&c->replicas_gc_lock);
- bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
-
- for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, BTREE_ITER_PREFETCH, k) {
- if (!bkey_extent_is_data(k.k))
- continue;
-
- ret = bch2_check_mark_super(c, bkey_s_c_to_extent(k),
- BCH_DATA_USER);
- if (ret) {
- bch_err(c, "error migrating data %i from check_mark_super()", ret);
- break;
- }
- }
-
- bch2_replicas_gc_end(c, ret);
- mutex_unlock(&c->replicas_gc_lock);
- return ret;
-}
-
-static int bch2_move_btree_off(struct bch_fs *c, struct bch_dev *ca,
- enum btree_id id)
-{
- struct btree_iter iter;
- struct btree *b;
- int ret;
-
- BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
-
- for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
- struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
-
- if (!bch2_extent_has_device(e, ca->dev_idx))
- continue;
-
- ret = bch2_btree_node_rewrite(c, &iter, b->data->keys.seq, 0);
- if (ret) {
- bch2_btree_iter_unlock(&iter);
- return ret;
- }
-
- bch2_btree_iter_set_locks_want(&iter, 0);
- }
- ret = bch2_btree_iter_unlock(&iter);
- if (ret)
- return ret; /* btree IO error */
-
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
- for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
- struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
-
- BUG_ON(bch2_extent_has_device(e, ca->dev_idx));
- }
- bch2_btree_iter_unlock(&iter);
- }
-
- return 0;
-}
-
-/*
- * This moves only the meta-data off, leaving the data (if any) in place.
- * The data is moved off by bch_move_data_off_device, if desired, and
- * called first.
- *
- * Before calling this, allocation of buckets to the device must have
- * been disabled, as else we'll continue to write meta-data to the device
- * when new buckets are picked for meta-data writes.
- * In addition, the copying gc and allocator threads for the device
- * must have been stopped. The allocator thread is the only thread
- * that writes prio/gen information.
- *
- * Meta-data consists of:
- * - Btree nodes
- * - Prio/gen information
- * - Journal entries
- * - Superblock
- *
- * This has to move the btree nodes and the journal only:
- * - prio/gen information is not written once the allocator thread is stopped.
- * also, as the prio/gen information is per-device it is not moved.
- * - the superblock will be written by the caller once after everything
- * is stopped.
- *
- * Note that currently there is no way to stop btree node and journal
- * meta-data writes to a device without moving the meta-data because
- * once a bucket is open for a btree node, unless a replacement btree
- * node is allocated (and the tree updated), the bucket will continue
- * to be written with updates. Similarly for the journal (it gets
- * written until filled).
- *
- * This routine leaves the data (if any) in place. Whether the data
- * should be moved off is a decision independent of whether the meta
- * data should be moved off and stopped:
- *
- * - For device removal, both data and meta-data are moved off, in
- * that order.
- *
- * - However, for turning a device read-only without removing it, only
- * meta-data is moved off since that's the only way to prevent it
- * from being written. Data is left in the device, but no new data
- * is written.
- */
-
-static int bch2_dev_metadata_migrate(struct bch_fs *c, struct bch_dev *ca,
- int flags)
-{
- unsigned i;
- int ret = 0;
-
- BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
-
- if (!(bch2_dev_has_data(c, ca) &
- ((1 << BCH_DATA_JOURNAL)|
- (1 << BCH_DATA_BTREE))))
- return 0;
-
- mutex_lock(&c->replicas_gc_lock);
- bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
-
- for (i = 0; i < BTREE_ID_NR; i++) {
- ret = bch2_move_btree_off(c, ca, i);
- if (ret)
- goto err;
- }
-err:
- bch2_replicas_gc_end(c, ret);
- mutex_unlock(&c->replicas_gc_lock);
- return ret;
-}
-
-int bch2_dev_data_migrate(struct bch_fs *c, struct bch_dev *ca, int flags)
-{
- return bch2_dev_usrdata_migrate(c, ca, flags) ?:
- bch2_dev_metadata_migrate(c, ca, flags);
-}
-
-static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s_extent e,
+static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s k,
unsigned dev_idx, int flags, bool metadata)
{
unsigned replicas = metadata ? c->opts.metadata_replicas : c->opts.data_replicas;
unsigned degraded = metadata ? BCH_FORCE_IF_METADATA_DEGRADED : BCH_FORCE_IF_DATA_DEGRADED;
unsigned nr_good;
- bch2_extent_drop_device(e, dev_idx);
+ bch2_bkey_drop_device(k, dev_idx);
- nr_good = bch2_extent_nr_good_ptrs(c, e.c);
+ nr_good = bch2_bkey_durability(c, k.s_c);
if ((!nr_good && !(flags & lost)) ||
(nr_good < replicas && !(flags & degraded)))
return -EINVAL;
return 0;
}
-/*
- * This doesn't actually move any data -- it marks the keys as bad
- * if they contain a pointer to a device that is forcibly removed
- * and don't have other valid pointers. If there are valid pointers,
- * the necessary pointers to the removed device are replaced with
- * bad pointers instead.
- *
- * This is only called if bch_move_data_off_device above failed, meaning
- * that we've already tried to move the data MAX_DATA_OFF_ITER times and
- * are not likely to succeed if we try again.
- */
-static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
+static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ unsigned dev_idx,
+ int flags)
{
- struct bkey_s_c k;
- struct bkey_s_extent e;
- BKEY_PADDED(key) tmp;
- struct btree_iter iter;
- int ret = 0;
-
- mutex_lock(&c->replicas_gc_lock);
- bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
-
- bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
- POS_MIN, BTREE_ITER_PREFETCH);
-
- while ((k = bch2_btree_iter_peek(&iter)).k &&
- !(ret = btree_iter_err(k))) {
- if (!bkey_extent_is_data(k.k))
- goto advance;
+ struct bch_fs *c = trans->c;
+ struct bkey_i *n;
+ int ret;
- if (!bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx))
- goto advance;
+ if (!bch2_bkey_has_device_c(k, dev_idx))
+ return 0;
- bkey_reassemble(&tmp.key, k);
- e = bkey_i_to_s_extent(&tmp.key);
+ n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ return ret;
- ret = drop_dev_ptrs(c, e, dev_idx, flags, false);
- if (ret)
- break;
+ ret = drop_dev_ptrs(c, bkey_i_to_s(n), dev_idx, flags, false);
+ if (ret)
+ return ret;
- /*
- * If the new extent no longer has any pointers, bch2_extent_normalize()
- * will do the appropriate thing with it (turning it into a
- * KEY_TYPE_ERROR key, or just a discard if it was a cached extent)
- */
- bch2_extent_normalize(c, e.s);
+ /*
+ * If the new extent no longer has any pointers, bch2_extent_normalize()
+ * will do the appropriate thing with it (turning it into a
+ * KEY_TYPE_error key, or just a discard if it was a cached extent)
+ */
+ bch2_extent_normalize(c, bkey_i_to_s(n));
- if (bkey_extent_is_data(e.k) &&
- (ret = bch2_check_mark_super(c, e.c, BCH_DATA_USER)))
- break;
+ /*
+ * Since we're not inserting through an extent iterator
+ * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
+ * we aren't using the extent overwrite path to delete, we're
+ * just using the normal key deletion path:
+ */
+ if (bkey_deleted(&n->k))
+ n->k.size = 0;
+ return 0;
+}
- iter.pos = bkey_start_pos(&tmp.key.k);
+static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ enum btree_id id;
+ int ret = 0;
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
- BTREE_INSERT_ATOMIC|
- BTREE_INSERT_NOFAIL,
- BTREE_INSERT_ENTRY(&iter, &tmp.key));
+ for (id = 0; id < BTREE_ID_NR; id++) {
+ if (!btree_type_has_ptrs(id))
+ continue;
- /*
- * don't want to leave ret == -EINTR, since if we raced and
- * something else overwrote the key we could spuriously return
- * -EINTR below:
- */
- if (ret == -EINTR)
- ret = 0;
+ ret = for_each_btree_key_commit(trans, iter, id, POS_MIN,
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+ bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags));
if (ret)
break;
-
- continue;
-advance:
- if (bkey_extent_is_data(k.k)) {
- ret = bch2_check_mark_super(c, bkey_s_c_to_extent(k),
- BCH_DATA_USER);
- if (ret)
- break;
- }
- bch2_btree_iter_advance_pos(&iter);
}
- bch2_btree_iter_unlock(&iter);
-
- bch2_replicas_gc_end(c, ret);
- mutex_unlock(&c->replicas_gc_lock);
+ bch2_trans_put(trans);
return ret;
}
static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
{
+ struct btree_trans *trans;
struct btree_iter iter;
struct closure cl;
struct btree *b;
+ struct bkey_buf k;
unsigned id;
int ret;
if (flags & BCH_FORCE_IF_METADATA_LOST)
return -EINVAL;
+ trans = bch2_trans_get(c);
+ bch2_bkey_buf_init(&k);
closure_init_stack(&cl);
- mutex_lock(&c->replicas_gc_lock);
- bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
-
for (id = 0; id < BTREE_ID_NR; id++) {
- for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
- __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
- struct bkey_i_extent *new_key;
+ bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0,
+ BTREE_ITER_PREFETCH);
retry:
- if (!bch2_extent_has_device(bkey_i_to_s_c_extent(&b->key),
- dev_idx)) {
- bch2_btree_iter_set_locks_want(&iter, 0);
-
- ret = bch2_check_mark_super(c, bkey_i_to_s_c_extent(&b->key),
- BCH_DATA_BTREE);
- if (ret)
- goto err;
- } else {
- bkey_copy(&tmp.k, &b->key);
- new_key = bkey_i_to_extent(&tmp.k);
-
- ret = drop_dev_ptrs(c, extent_i_to_s(new_key),
- dev_idx, flags, true);
- if (ret)
- goto err;
-
- if (!bch2_btree_iter_set_locks_want(&iter, U8_MAX)) {
- b = bch2_btree_iter_peek_node(&iter);
- goto retry;
- }
+ ret = 0;
+ while (bch2_trans_begin(trans),
+ (b = bch2_btree_iter_peek_node(&iter)) &&
+ !(ret = PTR_ERR_OR_ZERO(b))) {
+ if (!bch2_bkey_has_device_c(bkey_i_to_s_c(&b->key), dev_idx))
+ goto next;
+
+ bch2_bkey_buf_copy(&k, c, &b->key);
+
+ ret = drop_dev_ptrs(c, bkey_i_to_s(k.k),
+ dev_idx, flags, true);
+ if (ret) {
+ bch_err(c, "Cannot drop device without losing data");
+ break;
+ }
- ret = bch2_btree_node_update_key(c, &iter, b, new_key);
- if (ret == -EINTR) {
- b = bch2_btree_iter_peek_node(&iter);
- goto retry;
- }
- if (ret)
- goto err;
+ ret = bch2_btree_node_update_key(trans, &iter, b, k.k, 0, false);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+ ret = 0;
+ continue;
}
+
+ bch_err_msg(c, ret, "updating btree node key");
+ if (ret)
+ break;
+next:
+ bch2_btree_iter_next_node(&iter);
}
- bch2_btree_iter_unlock(&iter);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_iter_exit(trans, &iter);
- /* btree root */
- mutex_lock(&c->btree_root_lock);
- mutex_unlock(&c->btree_root_lock);
+ if (ret)
+ goto err;
}
+ bch2_btree_interior_updates_flush(c);
ret = 0;
-out:
- bch2_replicas_gc_end(c, ret);
- mutex_unlock(&c->replicas_gc_lock);
+err:
+ bch2_bkey_buf_exit(&k, c);
+ bch2_trans_put(trans);
+
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
return ret;
-err:
- bch2_btree_iter_unlock(&iter);
- goto out;
}
int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags)