2 * Code for moving data off a device.
6 #include "btree_update.h"
16 static int issue_migration_move(struct bch_dev *ca,
17 struct moving_context *ctxt,
18 struct bch_devs_mask *devs,
21 struct bch_fs *c = ca->fs;
22 struct disk_reservation res;
23 const struct bch_extent_ptr *ptr;
26 if (bch2_disk_reservation_get(c, &res, k.k->size, 0))
29 extent_for_each_ptr(bkey_s_c_to_extent(k), ptr)
30 if (ptr->dev == ca->dev_idx)
35 /* XXX: we need to be doing something with the disk reservation */
37 ret = bch2_data_move(c, ctxt, devs, k, ptr);
39 bch2_disk_reservation_put(c, &res);
43 #define MAX_DATA_OFF_ITER 10
46 * This moves only the data off, leaving the meta-data (if any) in place.
47 * It walks the key space, and for any key with a valid pointer to the
48 * relevant device, it copies it elsewhere, updating the key to point to
50 * The meta-data is moved off by bch_move_meta_data_off_device.
52 * Note: If the number of data replicas desired is > 1, ideally, any
53 * new copies would not be made in the same device that already have a
54 * copy (if there are enough devices).
55 * This is _not_ currently implemented. The multiple replicas can
56 * land in the same device even if there are others available.
59 int bch2_move_data_off_device(struct bch_dev *ca)
61 struct moving_context ctxt;
62 struct bch_fs *c = ca->fs;
67 BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
69 if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_USER)))
72 mutex_lock(&c->replicas_gc_lock);
73 bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
75 bch2_move_ctxt_init(&ctxt, NULL, SECTORS_IN_FLIGHT_PER_DEVICE);
76 __set_bit(ca->dev_idx, ctxt.avoid.d);
79 * In theory, only one pass should be necessary as we've
80 * quiesced all writes before calling this.
82 * However, in practice, more than one pass may be necessary:
83 * - Some move fails due to an error. We can can find this out
84 * from the moving_context.
85 * - Some key swap failed because some of the pointers in the
86 * key in the tree changed due to caching behavior, btree gc
87 * pruning stale pointers, or tiering (if the device being
88 * removed is in tier 0). A smarter bkey_cmpxchg would
91 * Thus this scans the tree one more time than strictly necessary,
92 * but that can be viewed as a verification pass.
96 struct btree_iter iter;
100 atomic_set(&ctxt.error_count, 0);
101 atomic_set(&ctxt.error_flags, 0);
103 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
104 BTREE_ITER_PREFETCH);
106 while (!bch2_move_ctxt_wait(&ctxt) &&
107 (k = bch2_btree_iter_peek(&iter)).k &&
108 !(ret = btree_iter_err(k))) {
109 if (!bkey_extent_is_data(k.k) ||
110 !bch2_extent_has_device(bkey_s_c_to_extent(k),
114 ret = issue_migration_move(ca, &ctxt, NULL, k);
115 if (ret == -ENOMEM) {
116 bch2_btree_iter_unlock(&iter);
119 * memory allocation failure, wait for some IO
122 bch2_move_ctxt_wait_for_io(&ctxt);
132 if (bkey_extent_is_data(k.k)) {
133 ret = bch2_check_mark_super(c, bkey_s_c_to_extent(k),
138 bch2_btree_iter_advance_pos(&iter);
139 bch2_btree_iter_cond_resched(&iter);
142 bch2_btree_iter_unlock(&iter);
143 bch2_move_ctxt_exit(&ctxt);
147 } while (seen_key_count && pass++ < MAX_DATA_OFF_ITER);
149 if (seen_key_count) {
150 pr_err("Unable to migrate all data in %d iterations.",
157 bch2_replicas_gc_end(c, ret);
158 mutex_unlock(&c->replicas_gc_lock);
163 * This walks the btree, and for any node on the relevant device it moves the
166 static int bch2_move_btree_off(struct bch_fs *c, struct bch_dev *ca,
169 struct btree_iter iter;
174 BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
176 closure_init_stack(&cl);
178 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
179 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
181 if (!bch2_extent_has_device(e, ca->dev_idx))
184 ret = bch2_btree_node_rewrite(c, &iter, b->data->keys.seq, 0);
186 bch2_btree_iter_unlock(&iter);
190 bch2_btree_iter_set_locks_want(&iter, 0);
192 ret = bch2_btree_iter_unlock(&iter);
194 return ret; /* btree IO error */
196 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
197 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
198 struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
200 BUG_ON(bch2_extent_has_device(e, ca->dev_idx));
202 bch2_btree_iter_unlock(&iter);
209 * This moves only the meta-data off, leaving the data (if any) in place.
210 * The data is moved off by bch_move_data_off_device, if desired, and
213 * Before calling this, allocation of buckets to the device must have
214 * been disabled, as else we'll continue to write meta-data to the device
215 * when new buckets are picked for meta-data writes.
216 * In addition, the copying gc and allocator threads for the device
217 * must have been stopped. The allocator thread is the only thread
218 * that writes prio/gen information.
220 * Meta-data consists of:
222 * - Prio/gen information
226 * This has to move the btree nodes and the journal only:
227 * - prio/gen information is not written once the allocator thread is stopped.
228 * also, as the prio/gen information is per-device it is not moved.
229 * - the superblock will be written by the caller once after everything
232 * Note that currently there is no way to stop btree node and journal
233 * meta-data writes to a device without moving the meta-data because
234 * once a bucket is open for a btree node, unless a replacement btree
235 * node is allocated (and the tree updated), the bucket will continue
236 * to be written with updates. Similarly for the journal (it gets
237 * written until filled).
239 * This routine leaves the data (if any) in place. Whether the data
240 * should be moved off is a decision independent of whether the meta
241 * data should be moved off and stopped:
243 * - For device removal, both data and meta-data are moved off, in
246 * - However, for turning a device read-only without removing it, only
247 * meta-data is moved off since that's the only way to prevent it
248 * from being written. Data is left in the device, but no new data
252 int bch2_move_metadata_off_device(struct bch_dev *ca)
254 struct bch_fs *c = ca->fs;
258 BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
260 if (!(bch2_dev_has_data(c, ca) &
261 ((1 << BCH_DATA_JOURNAL)|
262 (1 << BCH_DATA_BTREE))))
265 mutex_lock(&c->replicas_gc_lock);
266 bch2_replicas_gc_start(c,
267 (1 << BCH_DATA_JOURNAL)|
268 (1 << BCH_DATA_BTREE));
270 /* 1st, Move the btree nodes off the device */
272 for (i = 0; i < BTREE_ID_NR; i++) {
273 ret = bch2_move_btree_off(c, ca, i);
278 /* There are no prios/gens to move -- they are already in the device. */
280 /* 2nd. Move the journal off the device */
282 ret = bch2_journal_move(ca);
287 bch2_replicas_gc_end(c, ret);
288 mutex_unlock(&c->replicas_gc_lock);
293 * Flagging data bad when forcibly removing a device after failing to
294 * migrate the data off the device.
297 static int bch2_flag_key_bad(struct btree_iter *iter,
299 struct bkey_s_c_extent orig)
301 BKEY_PADDED(key) tmp;
302 struct bkey_s_extent e;
303 struct bch_extent_ptr *ptr;
304 struct bch_fs *c = ca->fs;
306 bkey_reassemble(&tmp.key, orig.s_c);
307 e = bkey_i_to_s_extent(&tmp.key);
309 extent_for_each_ptr_backwards(e, ptr)
310 if (ptr->dev == ca->dev_idx)
311 bch2_extent_drop_ptr(e, ptr);
314 * If the new extent no longer has any pointers, bch2_extent_normalize()
315 * will do the appropriate thing with it (turning it into a
316 * KEY_TYPE_ERROR key, or just a discard if it was a cached extent)
318 bch2_extent_normalize(c, e.s);
320 return bch2_btree_insert_at(c, NULL, NULL, NULL,
322 BTREE_INSERT_ENTRY(iter, &tmp.key));
326 * This doesn't actually move any data -- it marks the keys as bad
327 * if they contain a pointer to a device that is forcibly removed
328 * and don't have other valid pointers. If there are valid pointers,
329 * the necessary pointers to the removed device are replaced with
330 * bad pointers instead.
332 * This is only called if bch_move_data_off_device above failed, meaning
333 * that we've already tried to move the data MAX_DATA_OFF_ITER times and
334 * are not likely to succeed if we try again.
336 int bch2_flag_data_bad(struct bch_dev *ca)
338 struct bch_fs *c = ca->fs;
340 struct bkey_s_c_extent e;
341 struct btree_iter iter;
344 mutex_lock(&c->replicas_gc_lock);
345 bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
347 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
348 POS_MIN, BTREE_ITER_PREFETCH);
350 while ((k = bch2_btree_iter_peek(&iter)).k &&
351 !(ret = btree_iter_err(k))) {
352 if (!bkey_extent_is_data(k.k))
355 e = bkey_s_c_to_extent(k);
356 if (!bch2_extent_has_device(e, ca->dev_idx))
359 ret = bch2_flag_key_bad(&iter, ca, e);
362 * don't want to leave ret == -EINTR, since if we raced and
363 * something else overwrote the key we could spuriously return
372 * If the replica we're dropping was dirty and there is an
373 * additional cached replica, the cached replica will now be
374 * considered dirty - upon inserting the new version of the key,
375 * the bucket accounting will be updated to reflect the fact
376 * that the cached data is now dirty and everything works out as
377 * if by magic without us having to do anything.
379 * The one thing we need to be concerned with here is there's a
380 * race between when we drop any stale pointers from the key
381 * we're about to insert, and when the key actually gets
382 * inserted and the cached data is marked as dirty - we could
383 * end up trying to insert a key with a pointer that should be
384 * dirty, but points to stale data.
386 * If that happens the insert code just bails out and doesn't do
387 * the insert - however, it doesn't return an error. Hence we
388 * need to always recheck the current key before advancing to
393 if (bkey_extent_is_data(k.k)) {
394 ret = bch2_check_mark_super(c, bkey_s_c_to_extent(k),
399 bch2_btree_iter_advance_pos(&iter);
402 bch2_btree_iter_unlock(&iter);
404 bch2_replicas_gc_end(c, ret);
405 mutex_unlock(&c->replicas_gc_lock);