]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/data_update.c
Update bcachefs sources to a1b6677dca57 bcachefs: Fix looping around bch2_propagate_k...
[bcachefs-tools-debian] / libbcachefs / data_update.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "bkey_buf.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "data_update.h"
9 #include "ec.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "io_write.h"
13 #include "keylist.h"
14 #include "move.h"
15 #include "nocow_locking.h"
16 #include "subvolume.h"
17 #include "trace.h"
18
19 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
20 {
21         if (trace_move_extent_finish_enabled()) {
22                 struct printbuf buf = PRINTBUF;
23
24                 bch2_bkey_val_to_text(&buf, c, k);
25                 trace_move_extent_finish(c, buf.buf);
26                 printbuf_exit(&buf);
27         }
28 }
29
30 static void trace_move_extent_fail2(struct data_update *m,
31                          struct bkey_s_c new,
32                          struct bkey_s_c wrote,
33                          struct bkey_i *insert,
34                          const char *msg)
35 {
36         struct bch_fs *c = m->op.c;
37         struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
38         const union bch_extent_entry *entry;
39         struct bch_extent_ptr *ptr;
40         struct extent_ptr_decoded p;
41         struct printbuf buf = PRINTBUF;
42         unsigned i, rewrites_found = 0;
43
44         if (!trace_move_extent_fail_enabled())
45                 return;
46
47         prt_str(&buf, msg);
48
49         if (insert) {
50                 i = 0;
51                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
52                         if (((1U << i) & m->data_opts.rewrite_ptrs) &&
53                             (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
54                             !ptr->cached)
55                                 rewrites_found |= 1U << i;
56                         i++;
57                 }
58         }
59
60         prt_printf(&buf, "\nrewrite ptrs:   %u%u%u%u",
61                    (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
62                    (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
63                    (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
64                    (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
65
66         prt_printf(&buf, "\nrewrites found: %u%u%u%u",
67                    (rewrites_found & (1 << 0)) != 0,
68                    (rewrites_found & (1 << 1)) != 0,
69                    (rewrites_found & (1 << 2)) != 0,
70                    (rewrites_found & (1 << 3)) != 0);
71
72         prt_str(&buf, "\nold:    ");
73         bch2_bkey_val_to_text(&buf, c, old);
74
75         prt_str(&buf, "\nnew:    ");
76         bch2_bkey_val_to_text(&buf, c, new);
77
78         prt_str(&buf, "\nwrote:  ");
79         bch2_bkey_val_to_text(&buf, c, wrote);
80
81         if (insert) {
82                 prt_str(&buf, "\ninsert: ");
83                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
84         }
85
86         trace_move_extent_fail(c, buf.buf);
87         printbuf_exit(&buf);
88 }
89
90 static int __bch2_data_update_index_update(struct btree_trans *trans,
91                                            struct bch_write_op *op)
92 {
93         struct bch_fs *c = op->c;
94         struct btree_iter iter;
95         struct data_update *m =
96                 container_of(op, struct data_update, op);
97         struct keylist *keys = &op->insert_keys;
98         struct bkey_buf _new, _insert;
99         int ret = 0;
100
101         bch2_bkey_buf_init(&_new);
102         bch2_bkey_buf_init(&_insert);
103         bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
104
105         bch2_trans_iter_init(trans, &iter, m->btree_id,
106                              bkey_start_pos(&bch2_keylist_front(keys)->k),
107                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
108
109         while (1) {
110                 struct bkey_s_c k;
111                 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
112                 struct bkey_i *insert = NULL;
113                 struct bkey_i_extent *new;
114                 const union bch_extent_entry *entry_c;
115                 union bch_extent_entry *entry;
116                 struct extent_ptr_decoded p;
117                 struct bch_extent_ptr *ptr;
118                 const struct bch_extent_ptr *ptr_c;
119                 struct bpos next_pos;
120                 bool should_check_enospc;
121                 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
122                 unsigned rewrites_found = 0, durability, i;
123
124                 bch2_trans_begin(trans);
125
126                 k = bch2_btree_iter_peek_slot(&iter);
127                 ret = bkey_err(k);
128                 if (ret)
129                         goto err;
130
131                 new = bkey_i_to_extent(bch2_keylist_front(keys));
132
133                 if (!bch2_extents_match(k, old)) {
134                         trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
135                                                 NULL, "no match:");
136                         goto nowork;
137                 }
138
139                 bkey_reassemble(_insert.k, k);
140                 insert = _insert.k;
141
142                 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
143                 new = bkey_i_to_extent(_new.k);
144                 bch2_cut_front(iter.pos, &new->k_i);
145
146                 bch2_cut_front(iter.pos,        insert);
147                 bch2_cut_back(new->k.p,         insert);
148                 bch2_cut_back(insert->k.p,      &new->k_i);
149
150                 /*
151                  * @old: extent that we read from
152                  * @insert: key that we're going to update, initialized from
153                  * extent currently in btree - same as @old unless we raced with
154                  * other updates
155                  * @new: extent with new pointers that we'll be adding to @insert
156                  *
157                  * Fist, drop rewrite_ptrs from @new:
158                  */
159                 i = 0;
160                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
161                         if (((1U << i) & m->data_opts.rewrite_ptrs) &&
162                             (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
163                             !ptr->cached) {
164                                 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
165                                 /*
166                                  * See comment below:
167                                 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
168                                 */
169                                 rewrites_found |= 1U << i;
170                         }
171                         i++;
172                 }
173
174                 if (m->data_opts.rewrite_ptrs &&
175                     !rewrites_found &&
176                     bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
177                         trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
178                         goto nowork;
179                 }
180
181                 /*
182                  * A replica that we just wrote might conflict with a replica
183                  * that we want to keep, due to racing with another move:
184                  */
185 restart_drop_conflicting_replicas:
186                 extent_for_each_ptr(extent_i_to_s(new), ptr)
187                         if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
188                             !ptr_c->cached) {
189                                 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
190                                 goto restart_drop_conflicting_replicas;
191                         }
192
193                 if (!bkey_val_u64s(&new->k)) {
194                         trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
195                         goto nowork;
196                 }
197
198                 /* Now, drop pointers that conflict with what we just wrote: */
199                 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
200                         if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
201                                 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
202
203                 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
204                         bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
205
206                 /* Now, drop excess replicas: */
207 restart_drop_extra_replicas:
208                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
209                         unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
210
211                         if (!p.ptr.cached &&
212                             durability - ptr_durability >= m->op.opts.data_replicas) {
213                                 durability -= ptr_durability;
214                                 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), &entry->ptr);
215                                 /*
216                                  * Currently, we're dropping unneeded replicas
217                                  * instead of marking them as cached, since
218                                  * cached data in stripe buckets prevents them
219                                  * from being reused:
220                                 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
221                                 */
222                                 goto restart_drop_extra_replicas;
223                         }
224                 }
225
226                 /* Finally, add the pointers we just wrote: */
227                 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
228                         bch2_extent_ptr_decoded_append(insert, &p);
229
230                 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
231                 bch2_extent_normalize(c, bkey_i_to_s(insert));
232
233                 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
234                                                  &should_check_enospc,
235                                                  &i_sectors_delta,
236                                                  &disk_sectors_delta);
237                 if (ret)
238                         goto err;
239
240                 if (disk_sectors_delta > (s64) op->res.sectors) {
241                         ret = bch2_disk_reservation_add(c, &op->res,
242                                                 disk_sectors_delta - op->res.sectors,
243                                                 !should_check_enospc
244                                                 ? BCH_DISK_RESERVATION_NOFAIL : 0);
245                         if (ret)
246                                 goto out;
247                 }
248
249                 next_pos = insert->k.p;
250
251                 ret =   bch2_insert_snapshot_whiteouts(trans, m->btree_id,
252                                                 k.k->p, bkey_start_pos(&insert->k)) ?:
253                         bch2_insert_snapshot_whiteouts(trans, m->btree_id,
254                                                 k.k->p, insert->k.p);
255                 if (ret)
256                         goto err;
257
258                 ret   = bch2_trans_update(trans, &iter, insert,
259                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
260                         bch2_trans_commit(trans, &op->res,
261                                 NULL,
262                                 BTREE_INSERT_NOCHECK_RW|
263                                 BTREE_INSERT_NOFAIL|
264                                 m->data_opts.btree_insert_flags);
265                 if (!ret) {
266                         bch2_btree_iter_set_pos(&iter, next_pos);
267
268                         this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
269                         trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
270                 }
271 err:
272                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
273                         ret = 0;
274                 if (ret)
275                         break;
276 next:
277                 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
278                         bch2_keylist_pop_front(keys);
279                         if (bch2_keylist_empty(keys))
280                                 goto out;
281                 }
282                 continue;
283 nowork:
284                 if (m->ctxt && m->ctxt->stats) {
285                         BUG_ON(k.k->p.offset <= iter.pos.offset);
286                         atomic64_inc(&m->ctxt->stats->keys_raced);
287                         atomic64_add(k.k->p.offset - iter.pos.offset,
288                                      &m->ctxt->stats->sectors_raced);
289                 }
290
291                 this_cpu_inc(c->counters[BCH_COUNTER_move_extent_fail]);
292
293                 bch2_btree_iter_advance(&iter);
294                 goto next;
295         }
296 out:
297         bch2_trans_iter_exit(trans, &iter);
298         bch2_bkey_buf_exit(&_insert, c);
299         bch2_bkey_buf_exit(&_new, c);
300         BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
301         return ret;
302 }
303
304 int bch2_data_update_index_update(struct bch_write_op *op)
305 {
306         return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
307 }
308
309 void bch2_data_update_read_done(struct data_update *m,
310                                 struct bch_extent_crc_unpacked crc)
311 {
312         /* write bio must own pages: */
313         BUG_ON(!m->op.wbio.bio.bi_vcnt);
314
315         m->op.crc = crc;
316         m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
317
318         closure_call(&m->op.cl, bch2_write, NULL, NULL);
319 }
320
321 void bch2_data_update_exit(struct data_update *update)
322 {
323         struct bch_fs *c = update->op.c;
324         struct bkey_ptrs_c ptrs =
325                 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
326         const struct bch_extent_ptr *ptr;
327
328         bkey_for_each_ptr(ptrs, ptr) {
329                 if (c->opts.nocow_enabled)
330                         bch2_bucket_nocow_unlock(&c->nocow_locks,
331                                                  PTR_BUCKET_POS(c, ptr), 0);
332                 percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
333         }
334
335         bch2_bkey_buf_exit(&update->k, c);
336         bch2_disk_reservation_put(c, &update->op.res);
337         bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
338 }
339
340 void bch2_update_unwritten_extent(struct btree_trans *trans,
341                                   struct data_update *update)
342 {
343         struct bch_fs *c = update->op.c;
344         struct bio *bio = &update->op.wbio.bio;
345         struct bkey_i_extent *e;
346         struct write_point *wp;
347         struct bch_extent_ptr *ptr;
348         struct closure cl;
349         struct btree_iter iter;
350         struct bkey_s_c k;
351         int ret;
352
353         closure_init_stack(&cl);
354         bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
355
356         while (bio_sectors(bio)) {
357                 unsigned sectors = bio_sectors(bio);
358
359                 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
360                                      BTREE_ITER_SLOTS);
361                 ret = lockrestart_do(trans, ({
362                         k = bch2_btree_iter_peek_slot(&iter);
363                         bkey_err(k);
364                 }));
365                 bch2_trans_iter_exit(trans, &iter);
366
367                 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
368                         break;
369
370                 e = bkey_extent_init(update->op.insert_keys.top);
371                 e->k.p = update->op.pos;
372
373                 ret = bch2_alloc_sectors_start_trans(trans,
374                                 update->op.target,
375                                 false,
376                                 update->op.write_point,
377                                 &update->op.devs_have,
378                                 update->op.nr_replicas,
379                                 update->op.nr_replicas,
380                                 update->op.watermark,
381                                 0, &cl, &wp);
382                 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
383                         bch2_trans_unlock(trans);
384                         closure_sync(&cl);
385                         continue;
386                 }
387
388                 if (ret)
389                         return;
390
391                 sectors = min(sectors, wp->sectors_free);
392
393                 bch2_key_resize(&e->k, sectors);
394
395                 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
396                 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
397                 bch2_alloc_sectors_done(c, wp);
398
399                 bio_advance(bio, sectors << 9);
400                 update->op.pos.offset += sectors;
401
402                 extent_for_each_ptr(extent_i_to_s(e), ptr)
403                         ptr->unwritten = true;
404                 bch2_keylist_push(&update->op.insert_keys);
405
406                 ret = __bch2_data_update_index_update(trans, &update->op);
407
408                 bch2_open_buckets_put(c, &update->op.open_buckets);
409
410                 if (ret)
411                         break;
412         }
413
414         if (closure_nr_remaining(&cl) != 1) {
415                 bch2_trans_unlock(trans);
416                 closure_sync(&cl);
417         }
418 }
419
420 int bch2_data_update_init(struct btree_trans *trans,
421                           struct moving_context *ctxt,
422                           struct data_update *m,
423                           struct write_point_specifier wp,
424                           struct bch_io_opts io_opts,
425                           struct data_update_opts data_opts,
426                           enum btree_id btree_id,
427                           struct bkey_s_c k)
428 {
429         struct bch_fs *c = trans->c;
430         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
431         const union bch_extent_entry *entry;
432         struct extent_ptr_decoded p;
433         const struct bch_extent_ptr *ptr;
434         unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
435         unsigned ptrs_locked = 0;
436         int ret;
437
438         bch2_bkey_buf_init(&m->k);
439         bch2_bkey_buf_reassemble(&m->k, c, k);
440         m->btree_id     = btree_id;
441         m->data_opts    = data_opts;
442
443         bch2_write_op_init(&m->op, c, io_opts);
444         m->op.pos       = bkey_start_pos(k.k);
445         m->op.version   = k.k->version;
446         m->op.target    = data_opts.target;
447         m->op.write_point = wp;
448         m->op.nr_replicas = 0;
449         m->op.flags     |= BCH_WRITE_PAGES_STABLE|
450                 BCH_WRITE_PAGES_OWNED|
451                 BCH_WRITE_DATA_ENCODED|
452                 BCH_WRITE_MOVE|
453                 m->data_opts.write_flags;
454         m->op.compression_opt   = io_opts.background_compression ?: io_opts.compression;
455         m->op.watermark         = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
456
457         bkey_for_each_ptr(ptrs, ptr)
458                 percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
459
460         i = 0;
461         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
462                 bool locked;
463
464                 if (((1U << i) & m->data_opts.rewrite_ptrs)) {
465                         BUG_ON(p.ptr.cached);
466
467                         if (crc_is_compressed(p.crc))
468                                 reserve_sectors += k.k->size;
469
470                         m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
471                 } else if (!p.ptr.cached) {
472                         bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
473                 }
474
475                 /*
476                  * op->csum_type is normally initialized from the fs/file's
477                  * current options - but if an extent is encrypted, we require
478                  * that it stays encrypted:
479                  */
480                 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
481                         m->op.nonce     = p.crc.nonce + p.crc.offset;
482                         m->op.csum_type = p.crc.csum_type;
483                 }
484
485                 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
486                         m->op.incompressible = true;
487
488                 if (c->opts.nocow_enabled) {
489                         if (ctxt) {
490                                 move_ctxt_wait_event(ctxt, trans,
491                                                 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
492                                                                           PTR_BUCKET_POS(c, &p.ptr), 0)) ||
493                                                 !atomic_read(&ctxt->read_sectors));
494
495                                 if (!locked)
496                                         bch2_bucket_nocow_lock(&c->nocow_locks,
497                                                                PTR_BUCKET_POS(c, &p.ptr), 0);
498                         } else {
499                                 if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
500                                                                PTR_BUCKET_POS(c, &p.ptr), 0)) {
501                                         ret = -BCH_ERR_nocow_lock_blocked;
502                                         goto err;
503                                 }
504                         }
505                         ptrs_locked |= (1U << i);
506                 }
507
508                 i++;
509         }
510
511         if (reserve_sectors) {
512                 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
513                                 m->data_opts.extra_replicas
514                                 ? 0
515                                 : BCH_DISK_RESERVATION_NOFAIL);
516                 if (ret)
517                         goto err;
518         }
519
520         m->op.nr_replicas += m->data_opts.extra_replicas;
521         m->op.nr_replicas_required = m->op.nr_replicas;
522
523         BUG_ON(!m->op.nr_replicas);
524
525         /* Special handling required: */
526         if (bkey_extent_is_unwritten(k))
527                 return -BCH_ERR_unwritten_extent_update;
528         return 0;
529 err:
530         i = 0;
531         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
532                 if ((1U << i) & ptrs_locked)
533                         bch2_bucket_nocow_unlock(&c->nocow_locks,
534                                                  PTR_BUCKET_POS(c, &p.ptr), 0);
535                 percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
536                 i++;
537         }
538
539         bch2_bkey_buf_exit(&m->k, c);
540         bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
541         return ret;
542 }
543
544 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
545 {
546         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
547         const struct bch_extent_ptr *ptr;
548         unsigned i = 0;
549
550         bkey_for_each_ptr(ptrs, ptr) {
551                 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
552                         opts->kill_ptrs |= 1U << i;
553                         opts->rewrite_ptrs ^= 1U << i;
554                 }
555
556                 i++;
557         }
558 }