]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extent_update.c
Merge pull request #24 from brendon-boldt/new-install-distros
[bcachefs-tools-debian] / libbcachefs / extent_update.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bkey_on_stack.h"
4 #include "btree_update.h"
5 #include "btree_update_interior.h"
6 #include "buckets.h"
7 #include "debug.h"
8 #include "extents.h"
9 #include "extent_update.h"
10
11 /*
12  * This counts the number of iterators to the alloc & ec btrees we'll need
13  * inserting/removing this extent:
14  */
15 static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
16 {
17         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
18         const union bch_extent_entry *entry;
19         unsigned ret = 0;
20
21         bkey_extent_entry_for_each(ptrs, entry) {
22                 switch (__extent_entry_type(entry)) {
23                 case BCH_EXTENT_ENTRY_ptr:
24                 case BCH_EXTENT_ENTRY_stripe_ptr:
25                         ret++;
26                 }
27         }
28
29         return ret;
30 }
31
32 static int count_iters_for_insert(struct btree_trans *trans,
33                                   struct bkey_s_c k,
34                                   unsigned offset,
35                                   struct bpos *end,
36                                   unsigned *nr_iters,
37                                   unsigned max_iters,
38                                   bool overwrite)
39 {
40         int ret = 0;
41
42         switch (k.k->type) {
43         case KEY_TYPE_extent:
44         case KEY_TYPE_reflink_v:
45                 *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
46
47                 if (*nr_iters >= max_iters) {
48                         *end = bpos_min(*end, k.k->p);
49                         ret = 1;
50                 }
51
52                 break;
53         case KEY_TYPE_reflink_p: {
54                 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
55                 u64 idx = le64_to_cpu(p.v->idx);
56                 unsigned sectors = bpos_min(*end, p.k->p).offset -
57                         bkey_start_offset(p.k);
58                 struct btree_iter *iter;
59                 struct bkey_s_c r_k;
60
61                 for_each_btree_key(trans, iter,
62                                    BTREE_ID_REFLINK, POS(0, idx + offset),
63                                    BTREE_ITER_SLOTS, r_k, ret) {
64                         if (bkey_cmp(bkey_start_pos(r_k.k),
65                                      POS(0, idx + sectors)) >= 0)
66                                 break;
67
68                         *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
69
70                         if (*nr_iters >= max_iters) {
71                                 struct bpos pos = bkey_start_pos(k.k);
72                                 pos.offset += r_k.k->p.offset - idx;
73
74                                 *end = bpos_min(*end, pos);
75                                 ret = 1;
76                                 break;
77                         }
78                 }
79
80                 bch2_trans_iter_put(trans, iter);
81                 break;
82         }
83         }
84
85         return ret;
86 }
87
88 #define EXTENT_ITERS_MAX        (BTREE_ITER_MAX / 3)
89
90 int bch2_extent_atomic_end(struct btree_iter *iter,
91                            struct bkey_i *insert,
92                            struct bpos *end)
93 {
94         struct btree_trans *trans = iter->trans;
95         struct btree *b;
96         struct btree_node_iter  node_iter;
97         struct bkey_packed      *_k;
98         unsigned                nr_iters = 0;
99         int ret;
100
101         ret = bch2_btree_iter_traverse(iter);
102         if (ret)
103                 return ret;
104
105         b = iter->l[0].b;
106         node_iter = iter->l[0].iter;
107
108         BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
109
110         *end = bpos_min(insert->k.p, b->key.k.p);
111
112         ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
113                                      &nr_iters, EXTENT_ITERS_MAX / 2, false);
114         if (ret < 0)
115                 return ret;
116
117         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
118                                                       KEY_TYPE_discard))) {
119                 struct bkey     unpacked;
120                 struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
121                 unsigned offset = 0;
122
123                 if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
124                         break;
125
126                 if (bkey_cmp(bkey_start_pos(&insert->k),
127                              bkey_start_pos(k.k)) > 0)
128                         offset = bkey_start_offset(&insert->k) -
129                                 bkey_start_offset(k.k);
130
131                 ret = count_iters_for_insert(trans, k, offset, end,
132                                         &nr_iters, EXTENT_ITERS_MAX, true);
133                 if (ret)
134                         break;
135
136                 bch2_btree_node_iter_advance(&node_iter, b);
137         }
138
139         return ret < 0 ? ret : 0;
140 }
141
142 int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
143 {
144         struct bpos end;
145         int ret;
146
147         ret = bch2_extent_atomic_end(iter, k, &end);
148         if (ret)
149                 return ret;
150
151         bch2_cut_back(end, k);
152         return 0;
153 }
154
155 int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
156 {
157         struct bpos end;
158         int ret;
159
160         ret = bch2_extent_atomic_end(iter, k, &end);
161         if (ret)
162                 return ret;
163
164         return !bkey_cmp(end, k->k.p);
165 }
166
167 enum btree_insert_ret
168 bch2_extent_can_insert(struct btree_trans *trans,
169                        struct btree_insert_entry *insert,
170                        unsigned *u64s)
171 {
172         struct btree_iter_level *l = &insert->iter->l[0];
173         struct btree_node_iter node_iter = l->iter;
174         struct bkey_packed *_k;
175         struct bkey unpacked;
176         int sectors;
177
178         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
179                                                       KEY_TYPE_discard))) {
180                 struct bkey_s_c k = bkey_disassemble(l->b, _k, &unpacked);
181                 enum bch_extent_overlap overlap =
182                         bch2_extent_overlap(&insert->k->k, k.k);
183
184                 if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0)
185                         break;
186
187                 overlap = bch2_extent_overlap(&insert->k->k, k.k);
188
189                 if (bkey_written(l->b, _k) &&
190                     overlap != BCH_EXTENT_OVERLAP_ALL)
191                         *u64s += _k->u64s;
192
193                 /* account for having to split existing extent: */
194                 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
195                         *u64s += _k->u64s;
196
197                 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
198                     (sectors = bch2_bkey_sectors_compressed(k))) {
199                         int flags = trans->flags & BTREE_INSERT_NOFAIL
200                                 ? BCH_DISK_RESERVATION_NOFAIL : 0;
201
202                         switch (bch2_disk_reservation_add(trans->c,
203                                         trans->disk_res,
204                                         sectors, flags)) {
205                         case 0:
206                                 break;
207                         case -ENOSPC:
208                                 return BTREE_INSERT_ENOSPC;
209                         default:
210                                 BUG();
211                         }
212                 }
213
214                 if (overlap == BCH_EXTENT_OVERLAP_FRONT ||
215                     overlap == BCH_EXTENT_OVERLAP_MIDDLE)
216                         break;
217
218                 bch2_btree_node_iter_advance(&node_iter, l->b);
219         }
220
221         return BTREE_INSERT_OK;
222 }
223
224 static void verify_extent_nonoverlapping(struct bch_fs *c,
225                                          struct btree *b,
226                                          struct btree_node_iter *_iter,
227                                          struct bkey_i *insert)
228 {
229 #ifdef CONFIG_BCACHEFS_DEBUG
230         struct btree_node_iter iter;
231         struct bkey_packed *k;
232         struct bkey uk;
233
234         if (!expensive_debug_checks(c))
235                 return;
236
237         iter = *_iter;
238         k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard);
239         BUG_ON(k &&
240                (uk = bkey_unpack_key(b, k),
241                 bkey_cmp(uk.p, bkey_start_pos(&insert->k)) > 0));
242
243         iter = *_iter;
244         k = bch2_btree_node_iter_peek_filter(&iter, b, KEY_TYPE_discard);
245 #if 0
246         BUG_ON(k &&
247                (uk = bkey_unpack_key(b, k),
248                 bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0);
249 #else
250         if (k &&
251             (uk = bkey_unpack_key(b, k),
252              bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0) {
253                 char buf1[100];
254                 char buf2[100];
255
256                 bch2_bkey_to_text(&PBUF(buf1), &insert->k);
257                 bch2_bkey_to_text(&PBUF(buf2), &uk);
258
259                 bch2_dump_btree_node(b);
260                 panic("insert > next :\n"
261                       "insert %s\n"
262                       "next   %s\n",
263                       buf1, buf2);
264         }
265 #endif
266
267 #endif
268 }
269
270 static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
271                                struct bkey_i *insert)
272 {
273         struct btree_iter_level *l = &iter->l[0];
274         struct bkey_packed *k =
275                 bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
276
277         BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
278
279         EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
280         verify_extent_nonoverlapping(c, l->b, &l->iter, insert);
281
282         if (debug_check_bkeys(c))
283                 bch2_bkey_debugcheck(c, l->b, bkey_i_to_s_c(insert));
284
285         bch2_bset_insert(l->b, &l->iter, k, insert, 0);
286         bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
287 }
288
289 static void
290 extent_drop(struct bch_fs *c, struct btree_iter *iter,
291             struct bkey_packed *_k, struct bkey_s k)
292 {
293         struct btree_iter_level *l = &iter->l[0];
294
295         if (!bkey_whiteout(k.k))
296                 btree_account_key_drop(l->b, _k);
297
298         k.k->size = 0;
299         k.k->type = KEY_TYPE_deleted;
300         k.k->needs_whiteout = false;
301
302         if (_k >= btree_bset_last(l->b)->start) {
303                 unsigned u64s = _k->u64s;
304
305                 bch2_bset_delete(l->b, _k, _k->u64s);
306                 bch2_btree_node_iter_fix(iter, l->b, &l->iter, _k, u64s, 0);
307         } else {
308                 extent_save(l->b, _k, k.k);
309                 bch2_btree_iter_fix_key_modified(iter, l->b, _k);
310         }
311 }
312
313 static void
314 extent_squash(struct bch_fs *c, struct btree_iter *iter,
315               struct bkey_i *insert,
316               struct bkey_packed *_k, struct bkey_s k,
317               enum bch_extent_overlap overlap)
318 {
319         struct btree_iter_level *l = &iter->l[0];
320         struct bkey_on_stack tmp, split;
321
322         bkey_on_stack_init(&tmp);
323         bkey_on_stack_init(&split);
324
325         switch (overlap) {
326         case BCH_EXTENT_OVERLAP_FRONT:
327                 if (bkey_written(l->b, _k)) {
328                         bkey_on_stack_reassemble(&tmp, c, k.s_c);
329                         bch2_cut_front(insert->k.p, tmp.k);
330
331                         extent_drop(c, iter, _k, k);
332                         extent_bset_insert(c, iter, tmp.k);
333                 } else {
334                         btree_keys_account_val_delta(l->b, _k,
335                                 bch2_cut_front_s(insert->k.p, k));
336
337                         extent_save(l->b, _k, k.k);
338                         /*
339                          * No need to call bset_fix_invalidated_key, start of
340                          * extent changed but extents are indexed by where they
341                          * end
342                          */
343                         bch2_btree_iter_fix_key_modified(iter, l->b, _k);
344                 }
345                 break;
346         case BCH_EXTENT_OVERLAP_BACK:
347                 if (bkey_written(l->b, _k)) {
348                         bkey_on_stack_reassemble(&tmp, c, k.s_c);
349                         bch2_cut_back(bkey_start_pos(&insert->k), tmp.k);
350
351                         extent_drop(c, iter, _k, k);
352                         extent_bset_insert(c, iter, tmp.k);
353                 } else {
354                         btree_keys_account_val_delta(l->b, _k,
355                                 bch2_cut_back_s(bkey_start_pos(&insert->k), k));
356                         extent_save(l->b, _k, k.k);
357
358                         bch2_bset_fix_invalidated_key(l->b, _k);
359                         bch2_btree_node_iter_fix(iter, l->b, &l->iter,
360                                                  _k, _k->u64s, _k->u64s);
361                 }
362                 break;
363         case BCH_EXTENT_OVERLAP_ALL:
364                 extent_drop(c, iter, _k, k);
365                 break;
366         case BCH_EXTENT_OVERLAP_MIDDLE:
367                 bkey_on_stack_reassemble(&split, c, k.s_c);
368                 bch2_cut_back(bkey_start_pos(&insert->k), split.k);
369
370                 if (bkey_written(l->b, _k)) {
371                         bkey_on_stack_reassemble(&tmp, c, k.s_c);
372                         bch2_cut_front(insert->k.p, tmp.k);
373
374                         extent_drop(c, iter, _k, k);
375                         extent_bset_insert(c, iter, tmp.k);
376                 } else {
377                         btree_keys_account_val_delta(l->b, _k,
378                                 bch2_cut_front_s(insert->k.p, k));
379
380                         extent_save(l->b, _k, k.k);
381                         bch2_btree_iter_fix_key_modified(iter, l->b, _k);
382                 }
383
384                 extent_bset_insert(c, iter, split.k);
385                 break;
386         }
387
388         bkey_on_stack_exit(&split, c);
389         bkey_on_stack_exit(&tmp, c);
390 }
391
392 /**
393  * bch_extent_insert_fixup - insert a new extent and deal with overlaps
394  *
395  * this may result in not actually doing the insert, or inserting some subset
396  * of the insert key. For cmpxchg operations this is where that logic lives.
397  *
398  * All subsets of @insert that need to be inserted are inserted using
399  * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
400  * returns false, setting @iter->pos for the prefix of @insert that actually got
401  * inserted.
402  *
403  * BSET INVARIANTS: this function is responsible for maintaining all the
404  * invariants for bsets of extents in memory. things get really hairy with 0
405  * size extents
406  *
407  * within one bset:
408  *
409  * bkey_start_pos(bkey_next(k)) >= k
410  * or bkey_start_offset(bkey_next(k)) >= k->offset
411  *
412  * i.e. strict ordering, no overlapping extents.
413  *
414  * multiple bsets (i.e. full btree node):
415  *
416  * ∀ k, j
417  *   k.size != 0 ∧ j.size != 0 →
418  *     ¬ (k > bkey_start_pos(j) ∧ k < j)
419  *
420  * i.e. no two overlapping keys _of nonzero size_
421  *
422  * We can't realistically maintain this invariant for zero size keys because of
423  * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
424  * there may be another 0 size key between them in another bset, and it will
425  * thus overlap with the merged key.
426  *
427  * In addition, the end of iter->pos indicates how much has been processed.
428  * If the end of iter->pos is not the same as the end of insert, then
429  * key insertion needs to continue/be retried.
430  */
431 void bch2_insert_fixup_extent(struct btree_trans *trans,
432                               struct btree_insert_entry *insert_entry)
433 {
434         struct bch_fs *c = trans->c;
435         struct btree_iter *iter = insert_entry->iter;
436         struct bkey_i *insert   = insert_entry->k;
437         struct btree_iter_level *l = &iter->l[0];
438         struct btree_node_iter node_iter = l->iter;
439         bool do_update          = !bkey_whiteout(&insert->k);
440         struct bkey_packed *_k;
441         struct bkey unpacked;
442
443         EBUG_ON(iter->level);
444         EBUG_ON(!insert->k.size);
445         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
446
447         while ((_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
448                                                       KEY_TYPE_discard))) {
449                 struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
450                 enum bch_extent_overlap overlap =
451                         bch2_extent_overlap(&insert->k, k.k);
452
453                 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
454                         break;
455
456                 if (!bkey_whiteout(k.k))
457                         do_update = true;
458
459                 if (!do_update) {
460                         struct bpos cur_end = bpos_min(insert->k.p, k.k->p);
461
462                         bch2_cut_front(cur_end, insert);
463                         bch2_btree_iter_set_pos_same_leaf(iter, cur_end);
464                 } else {
465                         insert->k.needs_whiteout |= k.k->needs_whiteout;
466                         extent_squash(c, iter, insert, _k, k, overlap);
467                 }
468
469                 node_iter = l->iter;
470
471                 if (overlap == BCH_EXTENT_OVERLAP_FRONT ||
472                     overlap == BCH_EXTENT_OVERLAP_MIDDLE)
473                         break;
474         }
475
476         l->iter = node_iter;
477         bch2_btree_iter_set_pos_same_leaf(iter, insert->k.p);
478
479         if (do_update) {
480                 if (insert->k.type == KEY_TYPE_deleted)
481                         insert->k.type = KEY_TYPE_discard;
482
483                 extent_bset_insert(c, iter, insert);
484                 bch2_btree_journal_key(trans, iter, insert);
485         }
486
487         bch2_cut_front(insert->k.p, insert);
488 }