2 * Code for moving data off a device.
6 #include "btree_update.h"
16 static bool migrate_pred(void *arg, struct bkey_s_c_extent e)
18 struct bch_dev *ca = arg;
20 return bch2_extent_has_device(e, ca->dev_idx);
23 #define MAX_DATA_OFF_ITER 10
25 static int bch2_dev_usrdata_migrate(struct bch_fs *c, struct bch_dev *ca,
28 struct btree_iter iter;
30 struct bch_move_stats stats;
34 if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_USER)))
38 * XXX: we should be able to do this in one pass, but bch2_move_data()
39 * can spuriously fail to move an extent due to racing with other move
43 memset(&stats, 0, sizeof(stats));
45 ret = bch2_move_data(c, NULL,
46 SECTORS_IN_FLIGHT_PER_DEVICE,
48 writepoint_hashed((unsigned long) current),
55 bch_err(c, "error migrating data: %i", ret);
58 } while (atomic64_read(&stats.keys_moved) && pass++ < MAX_DATA_OFF_ITER);
60 if (atomic64_read(&stats.keys_moved)) {
61 bch_err(c, "unable to migrate all data in %d iterations",
66 mutex_lock(&c->replicas_gc_lock);
67 bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
69 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, BTREE_ITER_PREFETCH, k) {
70 ret = bch2_check_mark_super(c, BCH_DATA_USER, bch2_bkey_devs(k));
72 bch_err(c, "error migrating data %i from check_mark_super()", ret);
77 bch2_replicas_gc_end(c, ret);
78 mutex_unlock(&c->replicas_gc_lock);
82 static int bch2_dev_metadata_migrate(struct bch_fs *c, struct bch_dev *ca,
85 struct btree_iter iter;
90 if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_BTREE)))
93 mutex_lock(&c->replicas_gc_lock);
94 bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
96 for (id = 0; id < BTREE_ID_NR; id++) {
97 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
98 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
100 if (!bch2_extent_has_device(e, ca->dev_idx))
103 ret = bch2_btree_node_rewrite(c, &iter, b->data->keys.seq, 0);
105 bch2_btree_iter_unlock(&iter);
109 ret = bch2_btree_iter_unlock(&iter);
114 bch2_replicas_gc_end(c, ret);
115 mutex_unlock(&c->replicas_gc_lock);
119 int bch2_dev_data_migrate(struct bch_fs *c, struct bch_dev *ca, int flags)
121 BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW &&
122 bch2_dev_is_online(ca));
124 return bch2_dev_usrdata_migrate(c, ca, flags) ?:
125 bch2_dev_metadata_migrate(c, ca, flags);
128 static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s_extent e,
129 unsigned dev_idx, int flags, bool metadata)
131 unsigned replicas = metadata ? c->opts.metadata_replicas : c->opts.data_replicas;
132 unsigned lost = metadata ? BCH_FORCE_IF_METADATA_LOST : BCH_FORCE_IF_DATA_LOST;
133 unsigned degraded = metadata ? BCH_FORCE_IF_METADATA_DEGRADED : BCH_FORCE_IF_DATA_DEGRADED;
136 bch2_extent_drop_device(e, dev_idx);
138 nr_good = bch2_extent_nr_good_ptrs(c, e.c);
139 if ((!nr_good && !(flags & lost)) ||
140 (nr_good < replicas && !(flags & degraded)))
146 static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
149 struct bkey_s_extent e;
150 BKEY_PADDED(key) tmp;
151 struct btree_iter iter;
154 mutex_lock(&c->replicas_gc_lock);
155 bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
157 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
158 POS_MIN, BTREE_ITER_PREFETCH);
160 while ((k = bch2_btree_iter_peek(&iter)).k &&
161 !(ret = btree_iter_err(k))) {
162 if (!bkey_extent_is_data(k.k) ||
163 !bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) {
164 ret = bch2_check_mark_super(c, BCH_DATA_USER,
168 bch2_btree_iter_next(&iter);
172 bkey_reassemble(&tmp.key, k);
173 e = bkey_i_to_s_extent(&tmp.key);
175 ret = drop_dev_ptrs(c, e, dev_idx, flags, false);
180 * If the new extent no longer has any pointers, bch2_extent_normalize()
181 * will do the appropriate thing with it (turning it into a
182 * KEY_TYPE_ERROR key, or just a discard if it was a cached extent)
184 bch2_extent_normalize(c, e.s);
186 ret = bch2_check_mark_super(c, BCH_DATA_USER,
187 bch2_bkey_devs(bkey_i_to_s_c(&tmp.key)));
191 iter.pos = bkey_start_pos(&tmp.key.k);
193 ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
196 BTREE_INSERT_ENTRY(&iter, &tmp.key));
199 * don't want to leave ret == -EINTR, since if we raced and
200 * something else overwrote the key we could spuriously return
209 bch2_btree_iter_unlock(&iter);
211 bch2_replicas_gc_end(c, ret);
212 mutex_unlock(&c->replicas_gc_lock);
217 static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
219 struct btree_iter iter;
225 /* don't handle this yet: */
226 if (flags & BCH_FORCE_IF_METADATA_LOST)
229 closure_init_stack(&cl);
231 mutex_lock(&c->replicas_gc_lock);
232 bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
234 for (id = 0; id < BTREE_ID_NR; id++) {
235 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
236 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
237 struct bkey_i_extent *new_key;
239 if (!bch2_extent_has_device(bkey_i_to_s_c_extent(&b->key),
241 bch2_btree_iter_set_locks_want(&iter, 0);
243 ret = bch2_check_mark_super(c, BCH_DATA_BTREE,
244 bch2_bkey_devs(bkey_i_to_s_c(&b->key)));
248 bkey_copy(&tmp.k, &b->key);
249 new_key = bkey_i_to_extent(&tmp.k);
251 ret = drop_dev_ptrs(c, extent_i_to_s(new_key),
252 dev_idx, flags, true);
256 if (!bch2_btree_iter_set_locks_want(&iter, U8_MAX)) {
257 b = bch2_btree_iter_peek_node(&iter);
261 ret = bch2_btree_node_update_key(c, &iter, b, new_key);
263 b = bch2_btree_iter_peek_node(&iter);
270 bch2_btree_iter_unlock(&iter);
275 bch2_replicas_gc_end(c, ret);
276 mutex_unlock(&c->replicas_gc_lock);
280 bch2_btree_iter_unlock(&iter);
284 int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags)
286 return bch2_dev_usrdata_drop(c, dev_idx, flags) ?:
287 bch2_dev_metadata_drop(c, dev_idx, flags);