2 * Code for moving data off a device.
6 #include "btree_update.h"
16 static bool migrate_pred(void *arg, struct bkey_s_c_extent e)
18 struct bch_dev *ca = arg;
19 const struct bch_extent_ptr *ptr;
21 extent_for_each_ptr(e, ptr)
22 if (ptr->dev == ca->dev_idx)
28 #define MAX_DATA_OFF_ITER 10
30 static int bch2_dev_usrdata_migrate(struct bch_fs *c, struct bch_dev *ca,
33 struct btree_iter iter;
35 u64 keys_moved, sectors_moved;
39 BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
41 if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_USER)))
45 * In theory, only one pass should be necessary as we've
46 * quiesced all writes before calling this.
48 * However, in practice, more than one pass may be necessary:
49 * - Some move fails due to an error. We can can find this out
50 * from the moving_context.
51 * - Some key swap failed because some of the pointers in the
52 * key in the tree changed due to caching behavior, btree gc
53 * pruning stale pointers, or tiering (if the device being
54 * removed is in tier 0). A smarter bkey_cmpxchg would
57 * Thus this scans the tree one more time than strictly necessary,
58 * but that can be viewed as a verification pass.
61 ret = bch2_move_data(c, NULL,
62 SECTORS_IN_FLIGHT_PER_DEVICE,
64 writepoint_hashed((unsigned long) current),
71 bch_err(c, "error migrating data: %i", ret);
74 } while (keys_moved && pass++ < MAX_DATA_OFF_ITER);
77 bch_err(c, "unable to migrate all data in %d iterations",
82 mutex_lock(&c->replicas_gc_lock);
83 bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
85 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, BTREE_ITER_PREFETCH, k) {
86 if (!bkey_extent_is_data(k.k))
89 ret = bch2_check_mark_super(c, bkey_s_c_to_extent(k),
92 bch_err(c, "error migrating data %i from check_mark_super()", ret);
97 bch2_replicas_gc_end(c, ret);
98 mutex_unlock(&c->replicas_gc_lock);
102 static int bch2_move_btree_off(struct bch_fs *c, struct bch_dev *ca,
105 struct btree_iter iter;
109 BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
111 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
112 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
114 if (!bch2_extent_has_device(e, ca->dev_idx))
117 ret = bch2_btree_node_rewrite(c, &iter, b->data->keys.seq, 0);
119 bch2_btree_iter_unlock(&iter);
123 bch2_btree_iter_set_locks_want(&iter, 0);
125 ret = bch2_btree_iter_unlock(&iter);
127 return ret; /* btree IO error */
129 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
130 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
131 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
133 BUG_ON(bch2_extent_has_device(e, ca->dev_idx));
135 bch2_btree_iter_unlock(&iter);
142 * This moves only the meta-data off, leaving the data (if any) in place.
143 * The data is moved off by bch_move_data_off_device, if desired, and
146 * Before calling this, allocation of buckets to the device must have
147 * been disabled, as else we'll continue to write meta-data to the device
148 * when new buckets are picked for meta-data writes.
149 * In addition, the copying gc and allocator threads for the device
150 * must have been stopped. The allocator thread is the only thread
151 * that writes prio/gen information.
153 * Meta-data consists of:
155 * - Prio/gen information
159 * This has to move the btree nodes and the journal only:
160 * - prio/gen information is not written once the allocator thread is stopped.
161 * also, as the prio/gen information is per-device it is not moved.
162 * - the superblock will be written by the caller once after everything
165 * Note that currently there is no way to stop btree node and journal
166 * meta-data writes to a device without moving the meta-data because
167 * once a bucket is open for a btree node, unless a replacement btree
168 * node is allocated (and the tree updated), the bucket will continue
169 * to be written with updates. Similarly for the journal (it gets
170 * written until filled).
172 * This routine leaves the data (if any) in place. Whether the data
173 * should be moved off is a decision independent of whether the meta
174 * data should be moved off and stopped:
176 * - For device removal, both data and meta-data are moved off, in
179 * - However, for turning a device read-only without removing it, only
180 * meta-data is moved off since that's the only way to prevent it
181 * from being written. Data is left in the device, but no new data
185 static int bch2_dev_metadata_migrate(struct bch_fs *c, struct bch_dev *ca,
191 BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
193 if (!(bch2_dev_has_data(c, ca) &
194 ((1 << BCH_DATA_JOURNAL)|
195 (1 << BCH_DATA_BTREE))))
198 mutex_lock(&c->replicas_gc_lock);
199 bch2_replicas_gc_start(c,
200 (1 << BCH_DATA_JOURNAL)|
201 (1 << BCH_DATA_BTREE));
203 /* 1st, Move the btree nodes off the device */
205 for (i = 0; i < BTREE_ID_NR; i++) {
206 ret = bch2_move_btree_off(c, ca, i);
211 /* There are no prios/gens to move -- they are already in the device. */
213 /* 2nd. Move the journal off the device */
215 ret = bch2_journal_move(ca);
220 bch2_replicas_gc_end(c, ret);
221 mutex_unlock(&c->replicas_gc_lock);
225 int bch2_dev_data_migrate(struct bch_fs *c, struct bch_dev *ca, int flags)
227 return bch2_dev_usrdata_migrate(c, ca, flags) ?:
228 bch2_dev_metadata_migrate(c, ca, flags);
231 static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s_extent e,
232 unsigned dev_idx, int flags, bool metadata)
234 struct bch_extent_ptr *ptr;
235 unsigned replicas = metadata ? c->opts.metadata_replicas : c->opts.data_replicas;
236 unsigned lost = metadata ? BCH_FORCE_IF_METADATA_LOST : BCH_FORCE_IF_DATA_LOST;
237 unsigned degraded = metadata ? BCH_FORCE_IF_METADATA_DEGRADED : BCH_FORCE_IF_DATA_DEGRADED;
240 extent_for_each_ptr_backwards(e, ptr)
241 if (ptr->dev == dev_idx)
242 bch2_extent_drop_ptr(e, ptr);
244 nr_good = bch2_extent_nr_good_ptrs(c, e.c);
245 if ((!nr_good && !(flags & lost)) ||
246 (nr_good < replicas && !(flags & degraded)))
253 * This doesn't actually move any data -- it marks the keys as bad
254 * if they contain a pointer to a device that is forcibly removed
255 * and don't have other valid pointers. If there are valid pointers,
256 * the necessary pointers to the removed device are replaced with
257 * bad pointers instead.
259 * This is only called if bch_move_data_off_device above failed, meaning
260 * that we've already tried to move the data MAX_DATA_OFF_ITER times and
261 * are not likely to succeed if we try again.
263 static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
266 struct bkey_s_extent e;
267 BKEY_PADDED(key) tmp;
268 struct btree_iter iter;
271 mutex_lock(&c->replicas_gc_lock);
272 bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
274 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
275 POS_MIN, BTREE_ITER_PREFETCH);
277 while ((k = bch2_btree_iter_peek(&iter)).k &&
278 !(ret = btree_iter_err(k))) {
279 if (!bkey_extent_is_data(k.k))
282 if (!bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx))
285 bkey_reassemble(&tmp.key, k);
286 e = bkey_i_to_s_extent(&tmp.key);
288 ret = drop_dev_ptrs(c, e, dev_idx, flags, false);
293 * If the new extent no longer has any pointers, bch2_extent_normalize()
294 * will do the appropriate thing with it (turning it into a
295 * KEY_TYPE_ERROR key, or just a discard if it was a cached extent)
297 bch2_extent_normalize(c, e.s);
299 if (bkey_extent_is_data(e.k) &&
300 (ret = bch2_check_mark_super(c, e.c, BCH_DATA_USER)))
303 iter.pos = bkey_start_pos(&tmp.key.k);
305 ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
308 BTREE_INSERT_ENTRY(&iter, &tmp.key));
311 * don't want to leave ret == -EINTR, since if we raced and
312 * something else overwrote the key we could spuriously return
322 if (bkey_extent_is_data(k.k)) {
323 ret = bch2_check_mark_super(c, bkey_s_c_to_extent(k),
328 bch2_btree_iter_advance_pos(&iter);
331 bch2_btree_iter_unlock(&iter);
333 bch2_replicas_gc_end(c, ret);
334 mutex_unlock(&c->replicas_gc_lock);
339 static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
341 struct btree_iter iter;
347 /* don't handle this yet: */
348 if (flags & BCH_FORCE_IF_METADATA_LOST)
351 closure_init_stack(&cl);
353 mutex_lock(&c->replicas_gc_lock);
354 bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
356 for (id = 0; id < BTREE_ID_NR; id++) {
357 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
358 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
359 struct bkey_i_extent *new_key;
361 if (!bch2_extent_has_device(bkey_i_to_s_c_extent(&b->key),
363 bch2_btree_iter_set_locks_want(&iter, 0);
365 ret = bch2_check_mark_super(c, bkey_i_to_s_c_extent(&b->key),
370 bkey_copy(&tmp.k, &b->key);
371 new_key = bkey_i_to_extent(&tmp.k);
373 ret = drop_dev_ptrs(c, extent_i_to_s(new_key),
374 dev_idx, flags, true);
378 if (!bch2_btree_iter_set_locks_want(&iter, U8_MAX)) {
379 b = bch2_btree_iter_peek_node(&iter);
383 ret = bch2_btree_node_update_key(c, &iter, b, new_key);
385 b = bch2_btree_iter_peek_node(&iter);
392 bch2_btree_iter_unlock(&iter);
395 mutex_lock(&c->btree_root_lock);
396 mutex_unlock(&c->btree_root_lock);
401 bch2_replicas_gc_end(c, ret);
402 mutex_unlock(&c->replicas_gc_lock);
406 bch2_btree_iter_unlock(&iter);
410 int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags)
412 return bch2_dev_usrdata_drop(c, dev_idx, flags) ?:
413 bch2_dev_metadata_drop(c, dev_idx, flags);