]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/migrate.c
Update bcachefs sources to d5e561b3cc bcachefs: BCH_DATA ioctl
[bcachefs-tools-debian] / libbcachefs / migrate.c
1 /*
2  * Code for moving data off a device.
3  */
4
5 #include "bcachefs.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "extents.h"
9 #include "io.h"
10 #include "journal.h"
11 #include "keylist.h"
12 #include "migrate.h"
13 #include "move.h"
14 #include "super-io.h"
15
16 static bool migrate_pred(void *arg, struct bkey_s_c_extent e)
17 {
18         struct bch_dev *ca = arg;
19
20         return bch2_extent_has_device(e, ca->dev_idx);
21 }
22
23 #define MAX_DATA_OFF_ITER       10
24
25 static int bch2_dev_usrdata_migrate(struct bch_fs *c, struct bch_dev *ca,
26                                     int flags)
27 {
28         struct btree_iter iter;
29         struct bkey_s_c k;
30         struct bch_move_stats stats;
31         unsigned pass = 0;
32         int ret = 0;
33
34         if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_USER)))
35                 return 0;
36
37         /*
38          * XXX: we should be able to do this in one pass, but bch2_move_data()
39          * can spuriously fail to move an extent due to racing with other move
40          * operations
41          */
42         do {
43                 memset(&stats, 0, sizeof(stats));
44
45                 ret = bch2_move_data(c, NULL,
46                                      SECTORS_IN_FLIGHT_PER_DEVICE,
47                                      NULL,
48                                      writepoint_hashed((unsigned long) current),
49                                      0,
50                                      ca->dev_idx,
51                                      POS_MIN, POS_MAX,
52                                      migrate_pred, ca,
53                                      &stats);
54                 if (ret) {
55                         bch_err(c, "error migrating data: %i", ret);
56                         return ret;
57                 }
58         } while (atomic64_read(&stats.keys_moved) && pass++ < MAX_DATA_OFF_ITER);
59
60         if (atomic64_read(&stats.keys_moved)) {
61                 bch_err(c, "unable to migrate all data in %d iterations",
62                         MAX_DATA_OFF_ITER);
63                 return -1;
64         }
65
66         mutex_lock(&c->replicas_gc_lock);
67         bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
68
69         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, BTREE_ITER_PREFETCH, k) {
70                 ret = bch2_check_mark_super(c, BCH_DATA_USER, bch2_bkey_devs(k));
71                 if (ret) {
72                         bch_err(c, "error migrating data %i from check_mark_super()", ret);
73                         break;
74                 }
75         }
76
77         bch2_replicas_gc_end(c, ret);
78         mutex_unlock(&c->replicas_gc_lock);
79         return ret;
80 }
81
82 static int bch2_dev_metadata_migrate(struct bch_fs *c, struct bch_dev *ca,
83                                      int flags)
84 {
85         struct btree_iter iter;
86         struct btree *b;
87         int ret = 0;
88         unsigned id;
89
90         if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_BTREE)))
91                 return 0;
92
93         mutex_lock(&c->replicas_gc_lock);
94         bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
95
96         for (id = 0; id < BTREE_ID_NR; id++) {
97                 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
98                         struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
99
100                         if (!bch2_extent_has_device(e, ca->dev_idx))
101                                 continue;
102
103                         ret = bch2_btree_node_rewrite(c, &iter, b->data->keys.seq, 0);
104                         if (ret) {
105                                 bch2_btree_iter_unlock(&iter);
106                                 goto err;
107                         }
108                 }
109                 ret = bch2_btree_iter_unlock(&iter);
110                 if (ret)
111                         goto err;
112         }
113 err:
114         bch2_replicas_gc_end(c, ret);
115         mutex_unlock(&c->replicas_gc_lock);
116         return ret;
117 }
118
119 int bch2_dev_data_migrate(struct bch_fs *c, struct bch_dev *ca, int flags)
120 {
121         BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW &&
122                bch2_dev_is_online(ca));
123
124         return bch2_dev_usrdata_migrate(c, ca, flags) ?:
125                 bch2_dev_metadata_migrate(c, ca, flags);
126 }
127
128 static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s_extent e,
129                          unsigned dev_idx, int flags, bool metadata)
130 {
131         unsigned replicas = metadata ? c->opts.metadata_replicas : c->opts.data_replicas;
132         unsigned lost = metadata ? BCH_FORCE_IF_METADATA_LOST : BCH_FORCE_IF_DATA_LOST;
133         unsigned degraded = metadata ? BCH_FORCE_IF_METADATA_DEGRADED : BCH_FORCE_IF_DATA_DEGRADED;
134         unsigned nr_good;
135
136         bch2_extent_drop_device(e, dev_idx);
137
138         nr_good = bch2_extent_nr_good_ptrs(c, e.c);
139         if ((!nr_good && !(flags & lost)) ||
140             (nr_good < replicas && !(flags & degraded)))
141                 return -EINVAL;
142
143         return 0;
144 }
145
146 static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
147 {
148         struct bkey_s_c k;
149         struct bkey_s_extent e;
150         BKEY_PADDED(key) tmp;
151         struct btree_iter iter;
152         int ret = 0;
153
154         mutex_lock(&c->replicas_gc_lock);
155         bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
156
157         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
158                              POS_MIN, BTREE_ITER_PREFETCH);
159
160         while ((k = bch2_btree_iter_peek(&iter)).k &&
161                !(ret = btree_iter_err(k))) {
162                 if (!bkey_extent_is_data(k.k) ||
163                     !bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx)) {
164                         ret = bch2_check_mark_super(c, BCH_DATA_USER,
165                                                     bch2_bkey_devs(k));
166                         if (ret)
167                                 break;
168                         bch2_btree_iter_next(&iter);
169                         continue;
170                 }
171
172                 bkey_reassemble(&tmp.key, k);
173                 e = bkey_i_to_s_extent(&tmp.key);
174
175                 ret = drop_dev_ptrs(c, e, dev_idx, flags, false);
176                 if (ret)
177                         break;
178
179                 /*
180                  * If the new extent no longer has any pointers, bch2_extent_normalize()
181                  * will do the appropriate thing with it (turning it into a
182                  * KEY_TYPE_ERROR key, or just a discard if it was a cached extent)
183                  */
184                 bch2_extent_normalize(c, e.s);
185
186                 ret = bch2_check_mark_super(c, BCH_DATA_USER,
187                                 bch2_bkey_devs(bkey_i_to_s_c(&tmp.key)));
188                 if (ret)
189                         break;
190
191                 iter.pos = bkey_start_pos(&tmp.key.k);
192
193                 ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
194                                            BTREE_INSERT_ATOMIC|
195                                            BTREE_INSERT_NOFAIL,
196                                            BTREE_INSERT_ENTRY(&iter, &tmp.key));
197
198                 /*
199                  * don't want to leave ret == -EINTR, since if we raced and
200                  * something else overwrote the key we could spuriously return
201                  * -EINTR below:
202                  */
203                 if (ret == -EINTR)
204                         ret = 0;
205                 if (ret)
206                         break;
207         }
208
209         bch2_btree_iter_unlock(&iter);
210
211         bch2_replicas_gc_end(c, ret);
212         mutex_unlock(&c->replicas_gc_lock);
213
214         return ret;
215 }
216
217 static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
218 {
219         struct btree_iter iter;
220         struct closure cl;
221         struct btree *b;
222         unsigned id;
223         int ret;
224
225         /* don't handle this yet: */
226         if (flags & BCH_FORCE_IF_METADATA_LOST)
227                 return -EINVAL;
228
229         closure_init_stack(&cl);
230
231         mutex_lock(&c->replicas_gc_lock);
232         bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
233
234         for (id = 0; id < BTREE_ID_NR; id++) {
235                 for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
236                         __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
237                         struct bkey_i_extent *new_key;
238 retry:
239                         if (!bch2_extent_has_device(bkey_i_to_s_c_extent(&b->key),
240                                                     dev_idx)) {
241                                 bch2_btree_iter_set_locks_want(&iter, 0);
242
243                                 ret = bch2_check_mark_super(c, BCH_DATA_BTREE,
244                                                 bch2_bkey_devs(bkey_i_to_s_c(&b->key)));
245                                 if (ret)
246                                         goto err;
247                         } else {
248                                 bkey_copy(&tmp.k, &b->key);
249                                 new_key = bkey_i_to_extent(&tmp.k);
250
251                                 ret = drop_dev_ptrs(c, extent_i_to_s(new_key),
252                                                     dev_idx, flags, true);
253                                 if (ret)
254                                         goto err;
255
256                                 if (!bch2_btree_iter_set_locks_want(&iter, U8_MAX)) {
257                                         b = bch2_btree_iter_peek_node(&iter);
258                                         goto retry;
259                                 }
260
261                                 ret = bch2_btree_node_update_key(c, &iter, b, new_key);
262                                 if (ret == -EINTR) {
263                                         b = bch2_btree_iter_peek_node(&iter);
264                                         goto retry;
265                                 }
266                                 if (ret)
267                                         goto err;
268                         }
269                 }
270                 bch2_btree_iter_unlock(&iter);
271         }
272
273         ret = 0;
274 out:
275         bch2_replicas_gc_end(c, ret);
276         mutex_unlock(&c->replicas_gc_lock);
277
278         return ret;
279 err:
280         bch2_btree_iter_unlock(&iter);
281         goto out;
282 }
283
284 int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags)
285 {
286         return bch2_dev_usrdata_drop(c, dev_idx, flags) ?:
287                 bch2_dev_metadata_drop(c, dev_idx, flags);
288 }