]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/data_update.c
Update bcachefs sources to 1b14994029 bcachefs: Fragmentation LRU
[bcachefs-tools-debian] / libbcachefs / data_update.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "bkey_buf.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "data_update.h"
9 #include "ec.h"
10 #include "extents.h"
11 #include "io.h"
12 #include "keylist.h"
13 #include "move.h"
14 #include "nocow_locking.h"
15 #include "subvolume.h"
16
17 #include <trace/events/bcachefs.h>
18
19 static int insert_snapshot_whiteouts(struct btree_trans *trans,
20                                      enum btree_id id,
21                                      struct bpos old_pos,
22                                      struct bpos new_pos)
23 {
24         struct bch_fs *c = trans->c;
25         struct btree_iter iter, iter2;
26         struct bkey_s_c k, k2;
27         snapshot_id_list s;
28         struct bkey_i *update;
29         int ret;
30
31         if (!btree_type_has_snapshots(id))
32                 return 0;
33
34         darray_init(&s);
35
36         if (!bch2_snapshot_has_children(c, old_pos.snapshot))
37                 return 0;
38
39         bch2_trans_iter_init(trans, &iter, id, old_pos,
40                              BTREE_ITER_NOT_EXTENTS|
41                              BTREE_ITER_ALL_SNAPSHOTS);
42         while (1) {
43                 k = bch2_btree_iter_prev(&iter);
44                 ret = bkey_err(k);
45                 if (ret)
46                         break;
47
48                 if (!k.k)
49                         break;
50
51                 if (!bkey_eq(old_pos, k.k->p))
52                         break;
53
54                 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot) &&
55                     !snapshot_list_has_ancestor(c, &s, k.k->p.snapshot)) {
56                         struct bpos whiteout_pos = new_pos;
57
58                         whiteout_pos.snapshot = k.k->p.snapshot;
59
60                         bch2_trans_iter_init(trans, &iter2, id, whiteout_pos,
61                                              BTREE_ITER_NOT_EXTENTS|
62                                              BTREE_ITER_INTENT);
63                         k2 = bch2_btree_iter_peek_slot(&iter2);
64                         ret = bkey_err(k2);
65
66                         if (!ret && k2.k->type == KEY_TYPE_deleted) {
67                                 update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
68                                 ret = PTR_ERR_OR_ZERO(update);
69                                 if (ret)
70                                         break;
71
72                                 bkey_init(&update->k);
73                                 update->k.p             = whiteout_pos;
74                                 update->k.type          = KEY_TYPE_whiteout;
75
76                                 ret = bch2_trans_update(trans, &iter2, update,
77                                                         BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
78                         }
79                         bch2_trans_iter_exit(trans, &iter2);
80
81                         if (ret)
82                                 break;
83
84                         ret = snapshot_list_add(c, &s, k.k->p.snapshot);
85                         if (ret)
86                                 break;
87                 }
88         }
89         bch2_trans_iter_exit(trans, &iter);
90         darray_exit(&s);
91
92         return ret;
93 }
94
95 static void bch2_bkey_mark_dev_cached(struct bkey_s k, unsigned dev)
96 {
97         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
98         struct bch_extent_ptr *ptr;
99
100         bkey_for_each_ptr(ptrs, ptr)
101                 if (ptr->dev == dev)
102                         ptr->cached = true;
103 }
104
105 static int __bch2_data_update_index_update(struct btree_trans *trans,
106                                            struct bch_write_op *op)
107 {
108         struct bch_fs *c = op->c;
109         struct btree_iter iter;
110         struct data_update *m =
111                 container_of(op, struct data_update, op);
112         struct keylist *keys = &op->insert_keys;
113         struct bkey_buf _new, _insert;
114         int ret = 0;
115
116         bch2_bkey_buf_init(&_new);
117         bch2_bkey_buf_init(&_insert);
118         bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
119
120         bch2_trans_iter_init(trans, &iter, m->btree_id,
121                              bkey_start_pos(&bch2_keylist_front(keys)->k),
122                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
123
124         while (1) {
125                 struct bkey_s_c k;
126                 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
127                 struct bkey_i *insert;
128                 struct bkey_i_extent *new;
129                 const union bch_extent_entry *entry;
130                 struct extent_ptr_decoded p;
131                 struct bpos next_pos;
132                 bool did_work = false;
133                 bool should_check_enospc;
134                 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
135                 unsigned i;
136
137                 bch2_trans_begin(trans);
138
139                 k = bch2_btree_iter_peek_slot(&iter);
140                 ret = bkey_err(k);
141                 if (ret)
142                         goto err;
143
144                 new = bkey_i_to_extent(bch2_keylist_front(keys));
145
146                 if (!bch2_extents_match(k, old))
147                         goto nomatch;
148
149                 bkey_reassemble(_insert.k, k);
150                 insert = _insert.k;
151
152                 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
153                 new = bkey_i_to_extent(_new.k);
154                 bch2_cut_front(iter.pos, &new->k_i);
155
156                 bch2_cut_front(iter.pos,        insert);
157                 bch2_cut_back(new->k.p,         insert);
158                 bch2_cut_back(insert->k.p,      &new->k_i);
159
160                 /*
161                  * @old: extent that we read from
162                  * @insert: key that we're going to update, initialized from
163                  * extent currently in btree - same as @old unless we raced with
164                  * other updates
165                  * @new: extent with new pointers that we'll be adding to @insert
166                  *
167                  * Fist, drop rewrite_ptrs from @new:
168                  */
169                 i = 0;
170                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
171                         if (((1U << i) & m->data_opts.rewrite_ptrs) &&
172                             bch2_extent_has_ptr(old, p, bkey_i_to_s_c(insert))) {
173                                 /*
174                                  * If we're going to be adding a pointer to the
175                                  * same device, we have to drop the old one -
176                                  * otherwise, we can just mark it cached:
177                                  */
178                                 if (bch2_bkey_has_device(bkey_i_to_s_c(&new->k_i), p.ptr.dev))
179                                         bch2_bkey_drop_device_noerror(bkey_i_to_s(insert), p.ptr.dev);
180                                 else
181                                         bch2_bkey_mark_dev_cached(bkey_i_to_s(insert), p.ptr.dev);
182                         }
183                         i++;
184                 }
185
186
187                 /* Add new ptrs: */
188                 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) {
189                         const struct bch_extent_ptr *existing_ptr =
190                                 bch2_bkey_has_device(bkey_i_to_s_c(insert), p.ptr.dev);
191
192                         if (existing_ptr && existing_ptr->cached) {
193                                 /*
194                                  * We're replacing a cached pointer with a non
195                                  * cached pointer:
196                                  */
197                                 bch2_bkey_drop_device_noerror(bkey_i_to_s(insert),
198                                                               existing_ptr->dev);
199                         } else if (existing_ptr) {
200                                 /*
201                                  * raced with another move op? extent already
202                                  * has a pointer to the device we just wrote
203                                  * data to
204                                  */
205                                 continue;
206                         }
207
208                         bch2_extent_ptr_decoded_append(insert, &p);
209                         did_work = true;
210                 }
211
212                 if (!did_work)
213                         goto nomatch;
214
215                 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
216                 bch2_extent_normalize(c, bkey_i_to_s(insert));
217
218                 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
219                                                  &should_check_enospc,
220                                                  &i_sectors_delta,
221                                                  &disk_sectors_delta);
222                 if (ret)
223                         goto err;
224
225                 if (disk_sectors_delta > (s64) op->res.sectors) {
226                         ret = bch2_disk_reservation_add(c, &op->res,
227                                                 disk_sectors_delta - op->res.sectors,
228                                                 !should_check_enospc
229                                                 ? BCH_DISK_RESERVATION_NOFAIL : 0);
230                         if (ret)
231                                 goto out;
232                 }
233
234                 next_pos = insert->k.p;
235
236                 if (!bkey_eq(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
237                         ret = insert_snapshot_whiteouts(trans, m->btree_id, k.k->p,
238                                                         bkey_start_pos(&insert->k));
239                         if (ret)
240                                 goto err;
241                 }
242
243                 if (!bkey_eq(insert->k.p, k.k->p)) {
244                         ret = insert_snapshot_whiteouts(trans, m->btree_id,
245                                                         k.k->p, insert->k.p);
246                         if (ret)
247                                 goto err;
248                 }
249
250                 ret   = bch2_trans_update(trans, &iter, insert,
251                                 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
252                         bch2_trans_commit(trans, &op->res,
253                                 NULL,
254                                 BTREE_INSERT_NOFAIL|
255                                 m->data_opts.btree_insert_flags);
256                 if (!ret) {
257                         bch2_btree_iter_set_pos(&iter, next_pos);
258
259                         this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
260                         trace_move_extent_finish(&new->k);
261                 }
262 err:
263                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
264                         ret = 0;
265                 if (ret)
266                         break;
267 next:
268                 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
269                         bch2_keylist_pop_front(keys);
270                         if (bch2_keylist_empty(keys))
271                                 goto out;
272                 }
273                 continue;
274 nomatch:
275                 if (m->ctxt) {
276                         BUG_ON(k.k->p.offset <= iter.pos.offset);
277                         atomic64_inc(&m->ctxt->stats->keys_raced);
278                         atomic64_add(k.k->p.offset - iter.pos.offset,
279                                      &m->ctxt->stats->sectors_raced);
280                 }
281
282                 this_cpu_add(c->counters[BCH_COUNTER_move_extent_fail], new->k.size);
283                 trace_move_extent_fail(&new->k);
284
285                 bch2_btree_iter_advance(&iter);
286                 goto next;
287         }
288 out:
289         bch2_trans_iter_exit(trans, &iter);
290         bch2_bkey_buf_exit(&_insert, c);
291         bch2_bkey_buf_exit(&_new, c);
292         BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
293         return ret;
294 }
295
296 int bch2_data_update_index_update(struct bch_write_op *op)
297 {
298         struct bch_fs *c = op->c;
299         struct btree_trans trans;
300         int ret;
301
302         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
303         ret = __bch2_data_update_index_update(&trans, op);
304         bch2_trans_exit(&trans);
305
306         return ret;
307 }
308
309 void bch2_data_update_read_done(struct data_update *m,
310                                 struct bch_extent_crc_unpacked crc)
311 {
312         /* write bio must own pages: */
313         BUG_ON(!m->op.wbio.bio.bi_vcnt);
314
315         m->op.crc = crc;
316         m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
317
318         closure_call(&m->op.cl, bch2_write, NULL, NULL);
319 }
320
321 void bch2_data_update_exit(struct data_update *update)
322 {
323         struct bch_fs *c = update->op.c;
324         struct bkey_ptrs_c ptrs =
325                 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
326         const struct bch_extent_ptr *ptr;
327
328         bkey_for_each_ptr(ptrs, ptr) {
329                 bch2_bucket_nocow_unlock(&c->nocow_locks,
330                                          PTR_BUCKET_POS(c, ptr), 0);
331                 percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
332         }
333
334         bch2_bkey_buf_exit(&update->k, c);
335         bch2_disk_reservation_put(c, &update->op.res);
336         bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
337 }
338
339 void bch2_update_unwritten_extent(struct btree_trans *trans,
340                                   struct data_update *update)
341 {
342         struct bch_fs *c = update->op.c;
343         struct bio *bio = &update->op.wbio.bio;
344         struct bkey_i_extent *e;
345         struct write_point *wp;
346         struct bch_extent_ptr *ptr;
347         struct closure cl;
348         struct btree_iter iter;
349         struct bkey_s_c k;
350         int ret;
351
352         closure_init_stack(&cl);
353         bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
354
355         while (bio_sectors(bio)) {
356                 unsigned sectors = bio_sectors(bio);
357
358                 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
359                                      BTREE_ITER_SLOTS);
360                 ret = lockrestart_do(trans, ({
361                         k = bch2_btree_iter_peek_slot(&iter);
362                         bkey_err(k);
363                 }));
364                 bch2_trans_iter_exit(trans, &iter);
365
366                 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
367                         break;
368
369                 e = bkey_extent_init(update->op.insert_keys.top);
370                 e->k.p = update->op.pos;
371
372                 ret = bch2_alloc_sectors_start_trans(trans,
373                                 update->op.target,
374                                 false,
375                                 update->op.write_point,
376                                 &update->op.devs_have,
377                                 update->op.nr_replicas,
378                                 update->op.nr_replicas,
379                                 update->op.alloc_reserve,
380                                 0, &cl, &wp);
381                 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
382                         bch2_trans_unlock(trans);
383                         closure_sync(&cl);
384                         continue;
385                 }
386
387                 if (ret)
388                         return;
389
390                 sectors = min(sectors, wp->sectors_free);
391
392                 bch2_key_resize(&e->k, sectors);
393
394                 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
395                 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
396                 bch2_alloc_sectors_done(c, wp);
397
398                 bio_advance(bio, sectors << 9);
399                 update->op.pos.offset += sectors;
400
401                 extent_for_each_ptr(extent_i_to_s(e), ptr)
402                         ptr->unwritten = true;
403                 bch2_keylist_push(&update->op.insert_keys);
404
405                 ret = __bch2_data_update_index_update(trans, &update->op);
406
407                 bch2_open_buckets_put(c, &update->op.open_buckets);
408
409                 if (ret)
410                         break;
411         }
412
413         if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) {
414                 bch2_trans_unlock(trans);
415                 closure_sync(&cl);
416         }
417 }
418
419 int bch2_data_update_init(struct btree_trans *trans,
420                           struct moving_context *ctxt,
421                           struct data_update *m,
422                           struct write_point_specifier wp,
423                           struct bch_io_opts io_opts,
424                           struct data_update_opts data_opts,
425                           enum btree_id btree_id,
426                           struct bkey_s_c k)
427 {
428         struct bch_fs *c = trans->c;
429         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
430         const union bch_extent_entry *entry;
431         struct extent_ptr_decoded p;
432         const struct bch_extent_ptr *ptr;
433         unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
434         unsigned int ptrs_locked = 0;
435         int ret;
436
437         bch2_bkey_buf_init(&m->k);
438         bch2_bkey_buf_reassemble(&m->k, c, k);
439         m->btree_id     = btree_id;
440         m->data_opts    = data_opts;
441
442         bch2_write_op_init(&m->op, c, io_opts);
443         m->op.pos       = bkey_start_pos(k.k);
444         m->op.version   = k.k->version;
445         m->op.target    = data_opts.target;
446         m->op.write_point = wp;
447         m->op.flags     |= BCH_WRITE_PAGES_STABLE|
448                 BCH_WRITE_PAGES_OWNED|
449                 BCH_WRITE_DATA_ENCODED|
450                 BCH_WRITE_MOVE|
451                 m->data_opts.write_flags;
452         m->op.compression_type =
453                 bch2_compression_opt_to_type[io_opts.background_compression ?:
454                                              io_opts.compression];
455         if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
456                 m->op.alloc_reserve = RESERVE_movinggc;
457
458         bkey_for_each_ptr(ptrs, ptr)
459                 percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
460
461         i = 0;
462         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
463                 bool locked;
464
465                 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
466                     p.ptr.cached)
467                         BUG();
468
469                 if (!((1U << i) & m->data_opts.rewrite_ptrs) &&
470                     !p.ptr.cached)
471                         bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
472
473                 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
474                     crc_is_compressed(p.crc))
475                         reserve_sectors += k.k->size;
476
477                 /*
478                  * op->csum_type is normally initialized from the fs/file's
479                  * current options - but if an extent is encrypted, we require
480                  * that it stays encrypted:
481                  */
482                 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
483                         m->op.nonce     = p.crc.nonce + p.crc.offset;
484                         m->op.csum_type = p.crc.csum_type;
485                 }
486
487                 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
488                         m->op.incompressible = true;
489
490                 if (ctxt) {
491                         move_ctxt_wait_event(ctxt, trans,
492                                         (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
493                                                                   PTR_BUCKET_POS(c, &p.ptr), 0)) ||
494                                         !atomic_read(&ctxt->read_sectors));
495
496                         if (!locked)
497                                 bch2_bucket_nocow_lock(&c->nocow_locks,
498                                                        PTR_BUCKET_POS(c, &p.ptr), 0);
499                 } else {
500                         if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
501                                                        PTR_BUCKET_POS(c, &p.ptr), 0)) {
502                                 ret = -BCH_ERR_nocow_lock_blocked;
503                                 goto err;
504                         }
505                 }
506                 ptrs_locked |= (1U << i);
507                 i++;
508         }
509
510         if (reserve_sectors) {
511                 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
512                                 m->data_opts.extra_replicas
513                                 ? 0
514                                 : BCH_DISK_RESERVATION_NOFAIL);
515                 if (ret)
516                         goto err;
517         }
518
519         m->op.nr_replicas = m->op.nr_replicas_required =
520                 hweight32(m->data_opts.rewrite_ptrs) + m->data_opts.extra_replicas;
521
522         BUG_ON(!m->op.nr_replicas);
523
524         /* Special handling required: */
525         if (bkey_extent_is_unwritten(k))
526                 return -BCH_ERR_unwritten_extent_update;
527         return 0;
528 err:
529         i = 0;
530         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
531                 if ((1U << i) & ptrs_locked)
532                         bch2_bucket_nocow_unlock(&c->nocow_locks,
533                                                  PTR_BUCKET_POS(c, &p.ptr), 0);
534                 percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
535                 i++;
536         }
537
538         bch2_bkey_buf_exit(&m->k, c);
539         bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
540         return ret;
541 }
542
543 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
544 {
545         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
546         const struct bch_extent_ptr *ptr;
547         unsigned i = 0;
548
549         bkey_for_each_ptr(ptrs, ptr) {
550                 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
551                         opts->kill_ptrs |= 1U << i;
552                         opts->rewrite_ptrs ^= 1U << i;
553                 }
554
555                 i++;
556         }
557 }