2 * Code for moving data off a device.
6 #include "btree_update.h"
16 static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s_extent e,
17 unsigned dev_idx, int flags, bool metadata)
19 unsigned replicas = metadata ? c->opts.metadata_replicas : c->opts.data_replicas;
20 unsigned lost = metadata ? BCH_FORCE_IF_METADATA_LOST : BCH_FORCE_IF_DATA_LOST;
21 unsigned degraded = metadata ? BCH_FORCE_IF_METADATA_DEGRADED : BCH_FORCE_IF_DATA_DEGRADED;
24 bch2_extent_drop_device(e, dev_idx);
26 nr_good = bch2_extent_nr_good_ptrs(c, e.c);
27 if ((!nr_good && !(flags & lost)) ||
28 (nr_good < replicas && !(flags & degraded)))
34 static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
37 struct bkey_s_extent e;
39 struct btree_iter iter;
42 mutex_lock(&c->replicas_gc_lock);
43 bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED));
45 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
46 POS_MIN, BTREE_ITER_PREFETCH);
48 while ((k = bch2_btree_iter_peek(&iter)).k &&
49 !(ret = btree_iter_err(k))) {
50 if (!bkey_extent_is_data(k.k) ||
51 !bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) {
52 ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER, k);
55 bch2_btree_iter_next(&iter);
59 bkey_reassemble(&tmp.key, k);
60 e = bkey_i_to_s_extent(&tmp.key);
62 ret = drop_dev_ptrs(c, e, dev_idx, flags, false);
67 * If the new extent no longer has any pointers, bch2_extent_normalize()
68 * will do the appropriate thing with it (turning it into a
69 * KEY_TYPE_ERROR key, or just a discard if it was a cached extent)
71 bch2_extent_normalize(c, e.s);
73 ret = bch2_mark_bkey_replicas(c, BCH_DATA_USER,
74 bkey_i_to_s_c(&tmp.key));
78 iter.pos = bkey_start_pos(&tmp.key.k);
80 ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
83 BTREE_INSERT_ENTRY(&iter, &tmp.key));
86 * don't want to leave ret == -EINTR, since if we raced and
87 * something else overwrote the key we could spuriously return
96 bch2_btree_iter_unlock(&iter);
98 bch2_replicas_gc_end(c, ret);
99 mutex_unlock(&c->replicas_gc_lock);
104 static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
106 struct btree_iter iter;
112 /* don't handle this yet: */
113 if (flags & BCH_FORCE_IF_METADATA_LOST)
116 closure_init_stack(&cl);
118 mutex_lock(&c->replicas_gc_lock);
119 bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
121 for (id = 0; id < BTREE_ID_NR; id++) {
122 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
123 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
124 struct bkey_i_extent *new_key;
126 if (!bch2_extent_has_device(bkey_i_to_s_c_extent(&b->key),
128 bch2_btree_iter_set_locks_want(&iter, 0);
130 ret = bch2_mark_bkey_replicas(c, BCH_DATA_BTREE,
131 bkey_i_to_s_c(&b->key));
135 bkey_copy(&tmp.k, &b->key);
136 new_key = bkey_i_to_extent(&tmp.k);
138 ret = drop_dev_ptrs(c, extent_i_to_s(new_key),
139 dev_idx, flags, true);
143 if (!bch2_btree_iter_set_locks_want(&iter, U8_MAX)) {
144 b = bch2_btree_iter_peek_node(&iter);
148 ret = bch2_btree_node_update_key(c, &iter, b, new_key);
150 b = bch2_btree_iter_peek_node(&iter);
157 bch2_btree_iter_unlock(&iter);
162 bch2_replicas_gc_end(c, ret);
163 mutex_unlock(&c->replicas_gc_lock);
167 bch2_btree_iter_unlock(&iter);
171 int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags)
173 return bch2_dev_usrdata_drop(c, dev_idx, flags) ?:
174 bch2_dev_metadata_drop(c, dev_idx, flags);