]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/bkey_sort.c
Update bcachefs sources to d372ddcbfa bcachefs: Reorganize extents.c
[bcachefs-tools-debian] / libbcachefs / bkey_sort.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bkey_on_stack.h"
4 #include "bkey_sort.h"
5 #include "bset.h"
6 #include "extents.h"
7
8 /* too many iterators, need to clean this up */
9
10 /* btree_node_iter_large: */
11
12 #define btree_node_iter_cmp_heap(h, _l, _r) btree_node_iter_cmp(b, _l, _r)
13
14 static inline bool
15 bch2_btree_node_iter_large_end(struct btree_node_iter_large *iter)
16 {
17         return !iter->used;
18 }
19
20 static inline struct bkey_packed *
21 bch2_btree_node_iter_large_peek_all(struct btree_node_iter_large *iter,
22                                     struct btree *b)
23 {
24         return bch2_btree_node_iter_large_end(iter)
25                 ? NULL
26                 : __btree_node_offset_to_key(b, iter->data->k);
27 }
28
29 static void
30 bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter,
31                                    struct btree *b)
32 {
33         iter->data->k += __btree_node_offset_to_key(b, iter->data->k)->u64s;
34
35         EBUG_ON(!iter->used);
36         EBUG_ON(iter->data->k > iter->data->end);
37
38         if (iter->data->k == iter->data->end)
39                 heap_del(iter, 0, btree_node_iter_cmp_heap, NULL);
40         else
41                 heap_sift_down(iter, 0, btree_node_iter_cmp_heap, NULL);
42 }
43
44 static inline struct bkey_packed *
45 bch2_btree_node_iter_large_next_all(struct btree_node_iter_large *iter,
46                                     struct btree *b)
47 {
48         struct bkey_packed *ret = bch2_btree_node_iter_large_peek_all(iter, b);
49
50         if (ret)
51                 bch2_btree_node_iter_large_advance(iter, b);
52
53         return ret;
54 }
55
56 void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter,
57                                      struct btree *b,
58                                      const struct bkey_packed *k,
59                                      const struct bkey_packed *end)
60 {
61         if (k != end) {
62                 struct btree_node_iter_set n =
63                         ((struct btree_node_iter_set) {
64                                  __btree_node_key_to_offset(b, k),
65                                  __btree_node_key_to_offset(b, end)
66                          });
67
68                 __heap_add(iter, n, btree_node_iter_cmp_heap, NULL);
69         }
70 }
71
72 static void sort_key_next(struct btree_node_iter_large *iter,
73                           struct btree *b,
74                           struct btree_node_iter_set *i)
75 {
76         i->k += __btree_node_offset_to_key(b, i->k)->u64s;
77
78         while (i->k != i->end &&
79                !__btree_node_offset_to_key(b, i->k)->u64s)
80                 i->k++;
81
82         if (i->k == i->end)
83                 *i = iter->data[--iter->used];
84 }
85
86 /* regular sort_iters */
87
88 typedef int (*sort_cmp_fn)(struct btree *,
89                            struct bkey_packed *,
90                            struct bkey_packed *);
91
92 static inline void __sort_iter_sift(struct sort_iter *iter,
93                                     unsigned from,
94                                     sort_cmp_fn cmp)
95 {
96         unsigned i;
97
98         for (i = from;
99              i + 1 < iter->used &&
100              cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
101              i++)
102                 swap(iter->data[i], iter->data[i + 1]);
103 }
104
105 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
106 {
107
108         __sort_iter_sift(iter, 0, cmp);
109 }
110
111 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
112 {
113         unsigned i = iter->used;
114
115         while (i--)
116                 __sort_iter_sift(iter, i, cmp);
117 }
118
119 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
120 {
121         return iter->used ? iter->data->k : NULL;
122 }
123
124 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
125 {
126         iter->data->k = bkey_next_skip_noops(iter->data->k, iter->data->end);
127
128         BUG_ON(iter->data->k > iter->data->end);
129
130         if (iter->data->k == iter->data->end)
131                 array_remove_item(iter->data, iter->used, 0);
132         else
133                 sort_iter_sift(iter, cmp);
134 }
135
136 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
137                                                  sort_cmp_fn cmp)
138 {
139         struct bkey_packed *ret = sort_iter_peek(iter);
140
141         if (ret)
142                 sort_iter_advance(iter, cmp);
143
144         return ret;
145 }
146
147 /*
148  * Returns true if l > r - unless l == r, in which case returns true if l is
149  * older than r.
150  *
151  * Necessary for btree_sort_fixup() - if there are multiple keys that compare
152  * equal in different sets, we have to process them newest to oldest.
153  */
154 #define key_sort_cmp(h, l, r)                                           \
155 ({                                                                      \
156         bkey_cmp_packed(b,                                              \
157                         __btree_node_offset_to_key(b, (l).k),           \
158                         __btree_node_offset_to_key(b, (r).k))           \
159                                                                         \
160         ?: (l).k - (r).k;                                               \
161 })
162
163 static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
164                                         struct btree *b)
165 {
166         struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
167         struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
168
169         if (bkey_whiteout(k))
170                 return true;
171
172         if (iter->used < 2)
173                 return false;
174
175         if (iter->used > 2 &&
176             key_sort_cmp(iter, r[0], r[1]) >= 0)
177                 r++;
178
179         /*
180          * key_sort_cmp() ensures that when keys compare equal the older key
181          * comes first; so if l->k compares equal to r->k then l->k is older and
182          * should be dropped.
183          */
184         return !bkey_cmp_packed(b,
185                                 __btree_node_offset_to_key(b, l->k),
186                                 __btree_node_offset_to_key(b, r->k));
187 }
188
189 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
190                                         struct btree *b,
191                                         struct btree_node_iter_large *iter)
192 {
193         struct bkey_packed *out = dst->start;
194         struct btree_nr_keys nr;
195
196         memset(&nr, 0, sizeof(nr));
197
198         heap_resort(iter, key_sort_cmp, NULL);
199
200         while (!bch2_btree_node_iter_large_end(iter)) {
201                 if (!should_drop_next_key(iter, b)) {
202                         struct bkey_packed *k =
203                                 __btree_node_offset_to_key(b, iter->data->k);
204
205                         bkey_copy(out, k);
206                         btree_keys_account_key_add(&nr, 0, out);
207                         out = bkey_next(out);
208                 }
209
210                 sort_key_next(iter, b, iter->data);
211                 heap_sift_down(iter, 0, key_sort_cmp, NULL);
212         }
213
214         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
215         return nr;
216 }
217
218 /*
219  * If keys compare equal, compare by pointer order:
220  *
221  * Necessary for sort_fix_overlapping() - if there are multiple keys that
222  * compare equal in different sets, we have to process them newest to oldest.
223  */
224 #define extent_sort_cmp(h, l, r)                                        \
225 ({                                                                      \
226         struct bkey _ul = bkey_unpack_key(b,                            \
227                                 __btree_node_offset_to_key(b, (l).k));  \
228         struct bkey _ur = bkey_unpack_key(b,                            \
229                                 __btree_node_offset_to_key(b, (r).k));  \
230                                                                         \
231         bkey_cmp(bkey_start_pos(&_ul),                                  \
232                  bkey_start_pos(&_ur)) ?: (r).k - (l).k;                \
233 })
234
235 static inline void extent_sort_sift(struct btree_node_iter_large *iter,
236                                     struct btree *b, size_t i)
237 {
238         heap_sift_down(iter, i, extent_sort_cmp, NULL);
239 }
240
241 static inline void extent_sort_next(struct btree_node_iter_large *iter,
242                                     struct btree *b,
243                                     struct btree_node_iter_set *i)
244 {
245         sort_key_next(iter, b, i);
246         heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL);
247 }
248
249 static void extent_sort_advance_prev(struct bkey_format *f,
250                                      struct btree_nr_keys *nr,
251                                      struct bkey_packed *start,
252                                      struct bkey_packed **prev)
253 {
254         if (*prev) {
255                 bch2_bkey_pack(*prev, (void *) *prev, f);
256
257                 btree_keys_account_key_add(nr, 0, *prev);
258                 *prev = bkey_next(*prev);
259         } else {
260                 *prev = start;
261         }
262 }
263
264 static void extent_sort_append(struct bch_fs *c,
265                                struct bkey_format *f,
266                                struct btree_nr_keys *nr,
267                                struct bkey_packed *start,
268                                struct bkey_packed **prev,
269                                struct bkey_s k)
270 {
271         if (bkey_whiteout(k.k))
272                 return;
273
274         /*
275          * prev is always unpacked, for key merging - until right before we
276          * advance it:
277          */
278
279         if (*prev &&
280             bch2_bkey_merge(c, bkey_i_to_s((void *) *prev), k) ==
281             BCH_MERGE_MERGE)
282                 return;
283
284         extent_sort_advance_prev(f, nr, start, prev);
285
286         bkey_reassemble((void *) *prev, k.s_c);
287 }
288
289 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
290                                         struct bset *dst,
291                                         struct btree *b,
292                                         struct btree_node_iter_large *iter)
293 {
294         struct bkey_format *f = &b->format;
295         struct btree_node_iter_set *_l = iter->data, *_r;
296         struct bkey_packed *prev = NULL, *lk, *rk;
297         struct bkey l_unpacked, r_unpacked;
298         struct bkey_s l, r;
299         struct btree_nr_keys nr;
300         struct bkey_on_stack split;
301
302         memset(&nr, 0, sizeof(nr));
303         bkey_on_stack_init(&split);
304
305         heap_resort(iter, extent_sort_cmp, NULL);
306
307         while (!bch2_btree_node_iter_large_end(iter)) {
308                 lk = __btree_node_offset_to_key(b, _l->k);
309                 l = __bkey_disassemble(b, lk, &l_unpacked);
310
311                 if (iter->used == 1) {
312                         extent_sort_append(c, f, &nr, dst->start, &prev, l);
313                         extent_sort_next(iter, b, _l);
314                         continue;
315                 }
316
317                 _r = iter->data + 1;
318                 if (iter->used > 2 &&
319                     extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
320                         _r++;
321
322                 rk = __btree_node_offset_to_key(b, _r->k);
323                 r = __bkey_disassemble(b, rk, &r_unpacked);
324
325                 /* If current key and next key don't overlap, just append */
326                 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
327                         extent_sort_append(c, f, &nr, dst->start, &prev, l);
328                         extent_sort_next(iter, b, _l);
329                         continue;
330                 }
331
332                 /* Skip 0 size keys */
333                 if (!r.k->size) {
334                         extent_sort_next(iter, b, _r);
335                         continue;
336                 }
337
338                 /*
339                  * overlap: keep the newer key and trim the older key so they
340                  * don't overlap. comparing pointers tells us which one is
341                  * newer, since the bsets are appended one after the other.
342                  */
343
344                 /* can't happen because of comparison func */
345                 BUG_ON(_l->k < _r->k &&
346                        !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
347
348                 if (_l->k > _r->k) {
349                         /* l wins, trim r */
350                         if (bkey_cmp(l.k->p, r.k->p) >= 0) {
351                                 sort_key_next(iter, b, _r);
352                         } else {
353                                 bch2_cut_front_s(l.k->p, r);
354                                 extent_save(b, rk, r.k);
355                         }
356
357                         extent_sort_sift(iter, b, _r - iter->data);
358                 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
359                         bkey_on_stack_realloc(&split, c, l.k->u64s);
360
361                         /*
362                          * r wins, but it overlaps in the middle of l - split l:
363                          */
364                         bkey_reassemble(split.k, l.s_c);
365                         bch2_cut_back(bkey_start_pos(r.k), split.k);
366
367                         bch2_cut_front_s(r.k->p, l);
368                         extent_save(b, lk, l.k);
369
370                         extent_sort_sift(iter, b, 0);
371
372                         extent_sort_append(c, f, &nr, dst->start,
373                                            &prev, bkey_i_to_s(split.k));
374                 } else {
375                         bch2_cut_back_s(bkey_start_pos(r.k), l);
376                         extent_save(b, lk, l.k);
377                 }
378         }
379
380         extent_sort_advance_prev(f, &nr, dst->start, &prev);
381
382         dst->u64s = cpu_to_le16((u64 *) prev - dst->_data);
383
384         bkey_on_stack_exit(&split, c);
385         return nr;
386 }
387
388 /* Sort + repack in a new format: */
389 struct btree_nr_keys
390 bch2_sort_repack(struct bset *dst, struct btree *src,
391                  struct btree_node_iter *src_iter,
392                  struct bkey_format *out_f,
393                  bool filter_whiteouts)
394 {
395         struct bkey_format *in_f = &src->format;
396         struct bkey_packed *in, *out = vstruct_last(dst);
397         struct btree_nr_keys nr;
398
399         memset(&nr, 0, sizeof(nr));
400
401         while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
402                 if (filter_whiteouts && bkey_whiteout(in))
403                         continue;
404
405                 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
406                                        ? in_f : &bch2_bkey_format_current, in))
407                         out->format = KEY_FORMAT_LOCAL_BTREE;
408                 else
409                         bch2_bkey_unpack(src, (void *) out, in);
410
411                 btree_keys_account_key_add(&nr, 0, out);
412                 out = bkey_next(out);
413         }
414
415         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
416         return nr;
417 }
418
419 /* Sort, repack, and merge: */
420 struct btree_nr_keys
421 bch2_sort_repack_merge(struct bch_fs *c,
422                        struct bset *dst, struct btree *src,
423                        struct btree_node_iter *iter,
424                        struct bkey_format *out_f,
425                        bool filter_whiteouts)
426 {
427         struct bkey_packed *prev = NULL, *k_packed;
428         struct bkey_s k;
429         struct btree_nr_keys nr;
430         struct bkey unpacked;
431
432         memset(&nr, 0, sizeof(nr));
433
434         while ((k_packed = bch2_btree_node_iter_next_all(iter, src))) {
435                 if (filter_whiteouts && bkey_whiteout(k_packed))
436                         continue;
437
438                 k = __bkey_disassemble(src, k_packed, &unpacked);
439
440                 if (filter_whiteouts &&
441                     bch2_bkey_normalize(c, k))
442                         continue;
443
444                 extent_sort_append(c, out_f, &nr, vstruct_last(dst), &prev, k);
445         }
446
447         extent_sort_advance_prev(out_f, &nr, vstruct_last(dst), &prev);
448
449         dst->u64s = cpu_to_le16((u64 *) prev - dst->_data);
450         return nr;
451 }
452
453 static inline int sort_keys_cmp(struct btree *b,
454                                 struct bkey_packed *l,
455                                 struct bkey_packed *r)
456 {
457         return bkey_cmp_packed(b, l, r) ?:
458                 (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
459                 (int) l->needs_whiteout - (int) r->needs_whiteout;
460 }
461
462 unsigned bch2_sort_keys(struct bkey_packed *dst,
463                         struct sort_iter *iter,
464                         bool filter_whiteouts)
465 {
466         const struct bkey_format *f = &iter->b->format;
467         struct bkey_packed *in, *next, *out = dst;
468
469         sort_iter_sort(iter, sort_keys_cmp);
470
471         while ((in = sort_iter_next(iter, sort_keys_cmp))) {
472                 if (bkey_whiteout(in) &&
473                     (filter_whiteouts || !in->needs_whiteout))
474                         continue;
475
476                 if (bkey_whiteout(in) &&
477                     (next = sort_iter_peek(iter)) &&
478                     !bkey_cmp_packed(iter->b, in, next)) {
479                         BUG_ON(in->needs_whiteout &&
480                                next->needs_whiteout);
481                         /*
482                          * XXX racy, called with read lock from write path
483                          *
484                          * leads to spurious BUG_ON() in bkey_unpack_key() in
485                          * debug mode
486                          */
487                         next->needs_whiteout |= in->needs_whiteout;
488                         continue;
489                 }
490
491                 if (bkey_whiteout(in)) {
492                         memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
493                         set_bkeyp_val_u64s(f, out, 0);
494                 } else {
495                         bkey_copy(out, in);
496                 }
497                 out = bkey_next(out);
498         }
499
500         return (u64 *) out - (u64 *) dst;
501 }
502
503 static inline int sort_extents_cmp(struct btree *b,
504                                    struct bkey_packed *l,
505                                    struct bkey_packed *r)
506 {
507         return bkey_cmp_packed(b, l, r) ?:
508                 (int) bkey_deleted(l) - (int) bkey_deleted(r);
509 }
510
511 unsigned bch2_sort_extents(struct bkey_packed *dst,
512                            struct sort_iter *iter,
513                            bool filter_whiteouts)
514 {
515         struct bkey_packed *in, *out = dst;
516
517         sort_iter_sort(iter, sort_extents_cmp);
518
519         while ((in = sort_iter_next(iter, sort_extents_cmp))) {
520                 if (bkey_deleted(in))
521                         continue;
522
523                 if (bkey_whiteout(in) &&
524                     (filter_whiteouts || !in->needs_whiteout))
525                         continue;
526
527                 bkey_copy(out, in);
528                 out = bkey_next(out);
529         }
530
531         return (u64 *) out - (u64 *) dst;
532 }
533
534 static inline int sort_key_whiteouts_cmp(struct btree *b,
535                                          struct bkey_packed *l,
536                                          struct bkey_packed *r)
537 {
538         return bkey_cmp_packed(b, l, r);
539 }
540
541 unsigned bch2_sort_key_whiteouts(struct bkey_packed *dst,
542                                  struct sort_iter *iter)
543 {
544         struct bkey_packed *in, *out = dst;
545
546         sort_iter_sort(iter, sort_key_whiteouts_cmp);
547
548         while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
549                 bkey_copy(out, in);
550                 out = bkey_next(out);
551         }
552
553         return (u64 *) out - (u64 *) dst;
554 }
555
556 static inline int sort_extent_whiteouts_cmp(struct btree *b,
557                                             struct bkey_packed *l,
558                                             struct bkey_packed *r)
559 {
560         struct bkey ul = bkey_unpack_key(b, l);
561         struct bkey ur = bkey_unpack_key(b, r);
562
563         return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
564 }
565
566 unsigned bch2_sort_extent_whiteouts(struct bkey_packed *dst,
567                                     struct sort_iter *iter)
568 {
569         const struct bkey_format *f = &iter->b->format;
570         struct bkey_packed *in, *out = dst;
571         struct bkey_i l, r;
572         bool prev = false, l_packed = false;
573         u64 max_packed_size     = bkey_field_max(f, BKEY_FIELD_SIZE);
574         u64 max_packed_offset   = bkey_field_max(f, BKEY_FIELD_OFFSET);
575         u64 new_size;
576
577         max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
578
579         sort_iter_sort(iter, sort_extent_whiteouts_cmp);
580
581         while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
582                 if (bkey_deleted(in))
583                         continue;
584
585                 EBUG_ON(bkeyp_val_u64s(f, in));
586                 EBUG_ON(in->type != KEY_TYPE_discard);
587
588                 r.k = bkey_unpack_key(iter->b, in);
589
590                 if (prev &&
591                     bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
592                         if (bkey_cmp(l.k.p, r.k.p) >= 0)
593                                 continue;
594
595                         new_size = l_packed
596                                 ? min(max_packed_size, max_packed_offset -
597                                       bkey_start_offset(&l.k))
598                                 : KEY_SIZE_MAX;
599
600                         new_size = min(new_size, r.k.p.offset -
601                                        bkey_start_offset(&l.k));
602
603                         BUG_ON(new_size < l.k.size);
604
605                         bch2_key_resize(&l.k, new_size);
606
607                         if (bkey_cmp(l.k.p, r.k.p) >= 0)
608                                 continue;
609
610                         bch2_cut_front(l.k.p, &r);
611                 }
612
613                 if (prev) {
614                         if (!bch2_bkey_pack(out, &l, f)) {
615                                 BUG_ON(l_packed);
616                                 bkey_copy(out, &l);
617                         }
618                         out = bkey_next(out);
619                 }
620
621                 l = r;
622                 prev = true;
623                 l_packed = bkey_packed(in);
624         }
625
626         if (prev) {
627                 if (!bch2_bkey_pack(out, &l, f)) {
628                         BUG_ON(l_packed);
629                         bkey_copy(out, &l);
630                 }
631                 out = bkey_next(out);
632         }
633
634         return (u64 *) out - (u64 *) dst;
635 }