4 #include "btree_update.h"
11 #include <linux/ioprio.h>
13 #include <trace/events/bcachefs.h>
15 static struct bch_extent_ptr *bkey_find_ptr(struct bch_fs *c,
16 struct bkey_s_extent e,
17 struct bch_extent_ptr ptr)
19 struct bch_extent_ptr *ptr2;
20 unsigned bucket_bits = c->devs[ptr.dev]->bucket_bits;
22 extent_for_each_ptr(e, ptr2)
23 if (ptr2->dev == ptr.dev &&
24 ptr2->gen == ptr.gen &&
25 (ptr2->offset >> bucket_bits) ==
26 (ptr.offset >> bucket_bits))
32 static struct bch_extent_ptr *bch2_migrate_matching_ptr(struct migrate_write *m,
33 struct bkey_s_extent e)
35 const struct bch_extent_ptr *ptr;
36 struct bch_extent_ptr *ret;
39 ret = bkey_find_ptr(m->op.c, e, m->move_ptr);
41 extent_for_each_ptr(bkey_i_to_s_c_extent(&m->key), ptr)
42 if ((ret = bkey_find_ptr(m->op.c, e, *ptr)))
48 static int bch2_migrate_index_update(struct bch_write_op *op)
50 struct bch_fs *c = op->c;
51 struct migrate_write *m =
52 container_of(op, struct migrate_write, op);
53 struct keylist *keys = &op->insert_keys;
54 struct btree_iter iter;
57 bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
58 bkey_start_pos(&bch2_keylist_front(keys)->k),
62 struct bkey_s_extent insert =
63 bkey_i_to_s_extent(bch2_keylist_front(keys));
64 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter);
65 struct bch_extent_ptr *ptr;
66 struct bkey_s_extent e;
70 ret = bch2_btree_iter_unlock(&iter);
74 if (!bkey_extent_is_data(k.k))
77 bkey_reassemble(&new.k, k);
78 bch2_cut_front(iter.pos, &new.k);
79 bch2_cut_back(insert.k->p, &new.k.k);
80 e = bkey_i_to_s_extent(&new.k);
82 /* hack - promotes can race: */
84 extent_for_each_ptr(insert, ptr)
85 if (bch2_extent_has_device(e.c, ptr->dev))
88 ptr = bch2_migrate_matching_ptr(m, e);
90 int nr_new_dirty = bch2_extent_nr_dirty_ptrs(insert.s_c);
91 unsigned insert_flags =
95 /* copygc uses btree node reserve: */
97 insert_flags |= BTREE_INSERT_USE_RESERVE;
100 nr_new_dirty -= !ptr->cached;
101 __bch2_extent_drop_ptr(e, ptr);
104 BUG_ON(nr_new_dirty < 0);
106 memcpy_u64s(extent_entry_last(e),
108 bkey_val_u64s(insert.k));
109 e.k->u64s += bkey_val_u64s(insert.k);
111 bch2_extent_narrow_crcs(e);
112 bch2_extent_drop_redundant_crcs(e);
113 bch2_extent_normalize(c, e.s);
114 bch2_extent_mark_replicas_cached(c, e, nr_new_dirty);
116 ret = bch2_btree_insert_at(c, &op->res,
117 NULL, op_journal_seq(op),
119 BTREE_INSERT_ENTRY(&iter, &new.k));
120 if (ret && ret != -EINTR)
124 bch2_btree_iter_advance_pos(&iter);
127 while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
128 bch2_keylist_pop_front(keys);
129 if (bch2_keylist_empty(keys))
133 bch2_cut_front(iter.pos, bch2_keylist_front(keys));
136 bch2_btree_iter_unlock(&iter);
140 void bch2_migrate_write_init(struct bch_fs *c,
141 struct migrate_write *m,
142 struct write_point *wp,
144 const struct bch_extent_ptr *move_ptr,
147 bkey_reassemble(&m->key, k);
150 m->move = move_ptr != NULL;
152 m->move_ptr = *move_ptr;
154 if (bkey_extent_is_cached(k.k) ||
155 (move_ptr && move_ptr->cached))
156 flags |= BCH_WRITE_CACHED;
158 bch2_write_op_init(&m->op, c, (struct disk_reservation) { 0 }, wp,
159 bkey_start_pos(k.k), NULL, flags);
162 m->op.alloc_reserve = RESERVE_MOVINGGC;
164 m->op.nonce = extent_current_nonce(bkey_s_c_to_extent(k));
165 m->op.nr_replicas = 1;
166 m->op.index_update_fn = bch2_migrate_index_update;
169 static void migrate_bio_init(struct moving_io *io, struct bio *bio,
172 bio_init(bio, io->bi_inline_vecs,
173 DIV_ROUND_UP(sectors, PAGE_SECTORS));
174 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
176 bio->bi_iter.bi_size = sectors << 9;
177 bio->bi_private = &io->cl;
178 bch2_bio_map(bio, NULL);
181 static void moving_io_free(struct moving_io *io)
183 struct moving_context *ctxt = io->ctxt;
187 atomic_sub(io->write.key.k.size, &ctxt->sectors_in_flight);
188 wake_up(&ctxt->wait);
190 bio_for_each_segment_all(bv, &io->write.op.wbio.bio, i)
192 __free_page(bv->bv_page);
196 static void moving_error(struct moving_context *ctxt, unsigned flag)
198 atomic_inc(&ctxt->error_count);
199 //atomic_or(flag, &ctxt->error_flags);
202 static void moving_write_done(struct closure *cl)
204 struct moving_io *io = container_of(cl, struct moving_io, cl);
206 if (io->write.op.error)
207 moving_error(io->ctxt, MOVING_FLAG_WRITE);
209 //if (io->replace.failures)
210 // trace_copy_collision(q, &io->key.k);
215 static void write_moving(struct closure *cl)
217 struct moving_io *io = container_of(cl, struct moving_io, cl);
218 struct bch_write_op *op = &io->write.op;
220 closure_call(&op->cl, bch2_write, NULL, &io->cl);
221 closure_return_with_destructor(&io->cl, moving_write_done);
224 static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
226 struct moving_io *io =
227 list_first_entry_or_null(&ctxt->reads, struct moving_io, list);
229 return io && io->read_completed ? io : NULL;
232 static void read_moving_endio(struct bio *bio)
234 struct closure *cl = bio->bi_private;
235 struct moving_io *io = container_of(cl, struct moving_io, cl);
236 struct moving_context *ctxt = io->ctxt;
238 trace_move_read_done(&io->write.key.k);
241 moving_error(io->ctxt, MOVING_FLAG_READ);
243 io->read_completed = true;
244 if (next_pending_write(ctxt))
245 wake_up(&ctxt->wait);
247 closure_put(&ctxt->cl);
250 int bch2_data_move(struct bch_fs *c,
251 struct moving_context *ctxt,
252 struct write_point *wp,
254 const struct bch_extent_ptr *move_ptr)
256 struct extent_pick_ptr pick;
257 struct moving_io *io;
259 bch2_extent_pick_ptr(c, k, &ctxt->avoid, &pick);
260 if (IS_ERR_OR_NULL(pick.ca))
261 return pick.ca ? PTR_ERR(pick.ca) : 0;
263 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) *
264 DIV_ROUND_UP(k.k->size, PAGE_SECTORS), GFP_KERNEL);
270 migrate_bio_init(io, &io->rbio.bio, k.k->size);
272 bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0);
273 io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
274 io->rbio.bio.bi_end_io = read_moving_endio;
276 if (bio_alloc_pages(&io->rbio.bio, GFP_KERNEL)) {
281 migrate_bio_init(io, &io->write.op.wbio.bio, k.k->size);
283 bch2_migrate_write_init(c, &io->write, wp, k, move_ptr, 0);
285 trace_move_read(&io->write.key.k);
288 ctxt->sectors_moved += k.k->size;
290 bch2_ratelimit_increment(ctxt->rate, k.k->size);
292 atomic_add(k.k->size, &ctxt->sectors_in_flight);
293 list_add_tail(&io->list, &ctxt->reads);
296 * dropped by read_moving_endio() - guards against use after free of
297 * ctxt when doing wakeup
299 closure_get(&io->ctxt->cl);
300 bch2_read_extent(c, &io->rbio, k, &pick, 0);
304 static void do_pending_writes(struct moving_context *ctxt)
306 struct moving_io *io;
308 while ((io = next_pending_write(ctxt))) {
311 if (io->rbio.bio.bi_error) {
316 trace_move_write(&io->write.key.k);
317 closure_call(&io->cl, write_moving, NULL, &ctxt->cl);
321 #define move_ctxt_wait_event(_ctxt, _cond) \
323 do_pending_writes(_ctxt); \
327 __wait_event((_ctxt)->wait, \
328 next_pending_write(_ctxt) || (_cond)); \
331 int bch2_move_ctxt_wait(struct moving_context *ctxt)
333 move_ctxt_wait_event(ctxt,
334 atomic_read(&ctxt->sectors_in_flight) <
335 ctxt->max_sectors_in_flight);
338 ? bch2_ratelimit_wait_freezable_stoppable(ctxt->rate)
342 void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
344 unsigned sectors_pending = atomic_read(&ctxt->sectors_in_flight);
346 move_ctxt_wait_event(ctxt,
347 !atomic_read(&ctxt->sectors_in_flight) ||
348 atomic_read(&ctxt->sectors_in_flight) != sectors_pending);
351 void bch2_move_ctxt_exit(struct moving_context *ctxt)
353 move_ctxt_wait_event(ctxt, !atomic_read(&ctxt->sectors_in_flight));
354 closure_sync(&ctxt->cl);
356 EBUG_ON(!list_empty(&ctxt->reads));
357 EBUG_ON(atomic_read(&ctxt->sectors_in_flight));
360 void bch2_move_ctxt_init(struct moving_context *ctxt,
361 struct bch_ratelimit *rate,
362 unsigned max_sectors_in_flight)
364 memset(ctxt, 0, sizeof(*ctxt));
365 closure_init_stack(&ctxt->cl);
368 ctxt->max_sectors_in_flight = max_sectors_in_flight;
370 INIT_LIST_HEAD(&ctxt->reads);
371 init_waitqueue_head(&ctxt->wait);