]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/move.c
73132a0d335271d9c571f9979ee48f4d999c060b
[bcachefs-tools-debian] / libbcachefs / move.c
1
2 #include "bcachefs.h"
3 #include "btree_gc.h"
4 #include "btree_update.h"
5 #include "buckets.h"
6 #include "io.h"
7 #include "move.h"
8 #include "super-io.h"
9 #include "keylist.h"
10
11 #include <linux/ioprio.h>
12
13 #include <trace/events/bcachefs.h>
14
15 static struct bch_extent_ptr *bkey_find_ptr(struct bch_fs *c,
16                                             struct bkey_s_extent e,
17                                             struct bch_extent_ptr ptr)
18 {
19         struct bch_extent_ptr *ptr2;
20         unsigned bucket_bits = c->devs[ptr.dev]->bucket_bits;
21
22         extent_for_each_ptr(e, ptr2)
23                 if (ptr2->dev == ptr.dev &&
24                     ptr2->gen == ptr.gen &&
25                     (ptr2->offset >> bucket_bits) ==
26                     (ptr.offset >> bucket_bits))
27                         return ptr2;
28
29         return NULL;
30 }
31
32 static struct bch_extent_ptr *bch2_migrate_matching_ptr(struct migrate_write *m,
33                                                         struct bkey_s_extent e)
34 {
35         const struct bch_extent_ptr *ptr;
36         struct bch_extent_ptr *ret;
37
38         if (m->move)
39                 ret = bkey_find_ptr(m->op.c, e, m->move_ptr);
40         else
41                 extent_for_each_ptr(bkey_i_to_s_c_extent(&m->key), ptr)
42                         if ((ret = bkey_find_ptr(m->op.c, e, *ptr)))
43                                 break;
44
45         return ret;
46 }
47
48 static int bch2_migrate_index_update(struct bch_write_op *op)
49 {
50         struct bch_fs *c = op->c;
51         struct migrate_write *m =
52                 container_of(op, struct migrate_write, op);
53         struct keylist *keys = &op->insert_keys;
54         struct btree_iter iter;
55         int ret = 0;
56
57         bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
58                              bkey_start_pos(&bch2_keylist_front(keys)->k),
59                              BTREE_ITER_INTENT);
60
61         while (1) {
62                 struct bkey_s_extent insert =
63                         bkey_i_to_s_extent(bch2_keylist_front(keys));
64                 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter);
65                 struct bch_extent_ptr *ptr;
66                 struct bkey_s_extent e;
67                 BKEY_PADDED(k) new;
68
69                 if (!k.k) {
70                         ret = bch2_btree_iter_unlock(&iter);
71                         break;
72                 }
73
74                 if (!bkey_extent_is_data(k.k))
75                         goto nomatch;
76
77                 bkey_reassemble(&new.k, k);
78                 bch2_cut_front(iter.pos, &new.k);
79                 bch2_cut_back(insert.k->p, &new.k.k);
80                 e = bkey_i_to_s_extent(&new.k);
81
82                 /* hack - promotes can race: */
83                 if (m->promote)
84                         extent_for_each_ptr(insert, ptr)
85                                 if (bch2_extent_has_device(e.c, ptr->dev))
86                                         goto nomatch;
87
88                 ptr = bch2_migrate_matching_ptr(m, e);
89                 if (ptr) {
90                         int nr_new_dirty = bch2_extent_nr_dirty_ptrs(insert.s_c);
91                         unsigned insert_flags =
92                                 BTREE_INSERT_ATOMIC|
93                                 BTREE_INSERT_NOFAIL;
94
95                         /* copygc uses btree node reserve: */
96                         if (m->move)
97                                 insert_flags |= BTREE_INSERT_USE_RESERVE;
98
99                         if (m->move) {
100                                 nr_new_dirty -= !ptr->cached;
101                                 __bch2_extent_drop_ptr(e, ptr);
102                         }
103
104                         BUG_ON(nr_new_dirty < 0);
105
106                         memcpy_u64s(extent_entry_last(e),
107                                     insert.v,
108                                     bkey_val_u64s(insert.k));
109                         e.k->u64s += bkey_val_u64s(insert.k);
110
111                         bch2_extent_narrow_crcs(e);
112                         bch2_extent_drop_redundant_crcs(e);
113                         bch2_extent_normalize(c, e.s);
114                         bch2_extent_mark_replicas_cached(c, e, nr_new_dirty);
115
116                         ret = bch2_btree_insert_at(c, &op->res,
117                                         NULL, op_journal_seq(op),
118                                         insert_flags,
119                                         BTREE_INSERT_ENTRY(&iter, &new.k));
120                         if (ret && ret != -EINTR)
121                                 break;
122                 } else {
123 nomatch:
124                         bch2_btree_iter_advance_pos(&iter);
125                 }
126
127                 while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
128                         bch2_keylist_pop_front(keys);
129                         if (bch2_keylist_empty(keys))
130                                 goto out;
131                 }
132
133                 bch2_cut_front(iter.pos, bch2_keylist_front(keys));
134         }
135 out:
136         bch2_btree_iter_unlock(&iter);
137         return ret;
138 }
139
140 void bch2_migrate_write_init(struct bch_fs *c,
141                              struct migrate_write *m,
142                              struct write_point *wp,
143                              struct bkey_s_c k,
144                              const struct bch_extent_ptr *move_ptr,
145                              unsigned flags)
146 {
147         bkey_reassemble(&m->key, k);
148
149         m->promote = false;
150         m->move = move_ptr != NULL;
151         if (move_ptr)
152                 m->move_ptr = *move_ptr;
153
154         if (bkey_extent_is_cached(k.k) ||
155             (move_ptr && move_ptr->cached))
156                 flags |= BCH_WRITE_CACHED;
157
158         bch2_write_op_init(&m->op, c, (struct disk_reservation) { 0 }, wp,
159                           bkey_start_pos(k.k), NULL, flags);
160
161         if (m->move)
162                 m->op.alloc_reserve = RESERVE_MOVINGGC;
163
164         m->op.nonce             = extent_current_nonce(bkey_s_c_to_extent(k));
165         m->op.nr_replicas       = 1;
166         m->op.index_update_fn   = bch2_migrate_index_update;
167 }
168
169 static void migrate_bio_init(struct moving_io *io, struct bio *bio,
170                              unsigned sectors)
171 {
172         bio_init(bio, io->bi_inline_vecs,
173                  DIV_ROUND_UP(sectors, PAGE_SECTORS));
174         bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
175
176         bio->bi_iter.bi_size    = sectors << 9;
177         bio->bi_private         = &io->cl;
178         bch2_bio_map(bio, NULL);
179 }
180
181 static void moving_io_free(struct moving_io *io)
182 {
183         struct moving_context *ctxt = io->ctxt;
184         struct bio_vec *bv;
185         int i;
186
187         atomic_sub(io->write.key.k.size, &ctxt->sectors_in_flight);
188         wake_up(&ctxt->wait);
189
190         bio_for_each_segment_all(bv, &io->write.op.wbio.bio, i)
191                 if (bv->bv_page)
192                         __free_page(bv->bv_page);
193         kfree(io);
194 }
195
196 static void moving_error(struct moving_context *ctxt, unsigned flag)
197 {
198         atomic_inc(&ctxt->error_count);
199         //atomic_or(flag, &ctxt->error_flags);
200 }
201
202 static void moving_write_done(struct closure *cl)
203 {
204         struct moving_io *io = container_of(cl, struct moving_io, cl);
205
206         if (io->write.op.error)
207                 moving_error(io->ctxt, MOVING_FLAG_WRITE);
208
209         //if (io->replace.failures)
210         //      trace_copy_collision(q, &io->key.k);
211
212         moving_io_free(io);
213 }
214
215 static void write_moving(struct closure *cl)
216 {
217         struct moving_io *io = container_of(cl, struct moving_io, cl);
218         struct bch_write_op *op = &io->write.op;
219
220         closure_call(&op->cl, bch2_write, NULL, &io->cl);
221         closure_return_with_destructor(&io->cl, moving_write_done);
222 }
223
224 static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
225 {
226         struct moving_io *io =
227                 list_first_entry_or_null(&ctxt->reads, struct moving_io, list);
228
229         return io && io->read_completed ? io : NULL;
230 }
231
232 static void read_moving_endio(struct bio *bio)
233 {
234         struct closure *cl = bio->bi_private;
235         struct moving_io *io = container_of(cl, struct moving_io, cl);
236         struct moving_context *ctxt = io->ctxt;
237
238         trace_move_read_done(&io->write.key.k);
239
240         if (bio->bi_error)
241                 moving_error(io->ctxt, MOVING_FLAG_READ);
242
243         io->read_completed = true;
244         if (next_pending_write(ctxt))
245                 wake_up(&ctxt->wait);
246
247         closure_put(&ctxt->cl);
248 }
249
250 int bch2_data_move(struct bch_fs *c,
251                    struct moving_context *ctxt,
252                    struct write_point *wp,
253                    struct bkey_s_c k,
254                    const struct bch_extent_ptr *move_ptr)
255 {
256         struct extent_pick_ptr pick;
257         struct moving_io *io;
258
259         bch2_extent_pick_ptr(c, k, &ctxt->avoid, &pick);
260         if (IS_ERR_OR_NULL(pick.ca))
261                 return pick.ca ? PTR_ERR(pick.ca) : 0;
262
263         io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) *
264                      DIV_ROUND_UP(k.k->size, PAGE_SECTORS), GFP_KERNEL);
265         if (!io)
266                 return -ENOMEM;
267
268         io->ctxt = ctxt;
269
270         migrate_bio_init(io, &io->rbio.bio, k.k->size);
271
272         bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0);
273         io->rbio.bio.bi_iter.bi_sector  = bkey_start_offset(k.k);
274         io->rbio.bio.bi_end_io          = read_moving_endio;
275
276         if (bio_alloc_pages(&io->rbio.bio, GFP_KERNEL)) {
277                 kfree(io);
278                 return -ENOMEM;
279         }
280
281         migrate_bio_init(io, &io->write.op.wbio.bio, k.k->size);
282
283         bch2_migrate_write_init(c, &io->write, wp, k, move_ptr, 0);
284
285         trace_move_read(&io->write.key.k);
286
287         ctxt->keys_moved++;
288         ctxt->sectors_moved += k.k->size;
289         if (ctxt->rate)
290                 bch2_ratelimit_increment(ctxt->rate, k.k->size);
291
292         atomic_add(k.k->size, &ctxt->sectors_in_flight);
293         list_add_tail(&io->list, &ctxt->reads);
294
295         /*
296          * dropped by read_moving_endio() - guards against use after free of
297          * ctxt when doing wakeup
298          */
299         closure_get(&io->ctxt->cl);
300         bch2_read_extent(c, &io->rbio, k, &pick, 0);
301         return 0;
302 }
303
304 static void do_pending_writes(struct moving_context *ctxt)
305 {
306         struct moving_io *io;
307
308         while ((io = next_pending_write(ctxt))) {
309                 list_del(&io->list);
310
311                 if (io->rbio.bio.bi_error) {
312                         moving_io_free(io);
313                         continue;
314                 }
315
316                 trace_move_write(&io->write.key.k);
317                 closure_call(&io->cl, write_moving, NULL, &ctxt->cl);
318         }
319 }
320
321 #define move_ctxt_wait_event(_ctxt, _cond)                      \
322 do {                                                            \
323         do_pending_writes(_ctxt);                               \
324                                                                 \
325         if (_cond)                                              \
326                 break;                                          \
327         __wait_event((_ctxt)->wait,                             \
328                      next_pending_write(_ctxt) || (_cond));     \
329 } while (1)
330
331 int bch2_move_ctxt_wait(struct moving_context *ctxt)
332 {
333         move_ctxt_wait_event(ctxt,
334                              atomic_read(&ctxt->sectors_in_flight) <
335                              ctxt->max_sectors_in_flight);
336
337         return ctxt->rate
338                 ? bch2_ratelimit_wait_freezable_stoppable(ctxt->rate)
339                 : 0;
340 }
341
342 void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
343 {
344         unsigned sectors_pending = atomic_read(&ctxt->sectors_in_flight);
345
346         move_ctxt_wait_event(ctxt,
347                 !atomic_read(&ctxt->sectors_in_flight) ||
348                 atomic_read(&ctxt->sectors_in_flight) != sectors_pending);
349 }
350
351 void bch2_move_ctxt_exit(struct moving_context *ctxt)
352 {
353         move_ctxt_wait_event(ctxt, !atomic_read(&ctxt->sectors_in_flight));
354         closure_sync(&ctxt->cl);
355
356         EBUG_ON(!list_empty(&ctxt->reads));
357         EBUG_ON(atomic_read(&ctxt->sectors_in_flight));
358 }
359
360 void bch2_move_ctxt_init(struct moving_context *ctxt,
361                         struct bch_ratelimit *rate,
362                         unsigned max_sectors_in_flight)
363 {
364         memset(ctxt, 0, sizeof(*ctxt));
365         closure_init_stack(&ctxt->cl);
366
367         ctxt->rate = rate;
368         ctxt->max_sectors_in_flight = max_sectors_in_flight;
369
370         INIT_LIST_HEAD(&ctxt->reads);
371         init_waitqueue_head(&ctxt->wait);
372 }