1 // SPDX-License-Identifier: GPL-2.0
3 #include "bkey_on_stack.h"
8 /* too many iterators, need to clean this up */
10 /* btree_node_iter_large: */
12 #define btree_node_iter_cmp_heap(h, _l, _r) btree_node_iter_cmp(b, _l, _r)
15 bch2_btree_node_iter_large_end(struct btree_node_iter_large *iter)
20 static inline struct bkey_packed *
21 bch2_btree_node_iter_large_peek_all(struct btree_node_iter_large *iter,
24 return bch2_btree_node_iter_large_end(iter)
26 : __btree_node_offset_to_key(b, iter->data->k);
30 bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter,
33 iter->data->k += __btree_node_offset_to_key(b, iter->data->k)->u64s;
36 EBUG_ON(iter->data->k > iter->data->end);
38 if (iter->data->k == iter->data->end)
39 heap_del(iter, 0, btree_node_iter_cmp_heap, NULL);
41 heap_sift_down(iter, 0, btree_node_iter_cmp_heap, NULL);
44 static inline struct bkey_packed *
45 bch2_btree_node_iter_large_next_all(struct btree_node_iter_large *iter,
48 struct bkey_packed *ret = bch2_btree_node_iter_large_peek_all(iter, b);
51 bch2_btree_node_iter_large_advance(iter, b);
56 void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter,
58 const struct bkey_packed *k,
59 const struct bkey_packed *end)
62 struct btree_node_iter_set n =
63 ((struct btree_node_iter_set) {
64 __btree_node_key_to_offset(b, k),
65 __btree_node_key_to_offset(b, end)
68 __heap_add(iter, n, btree_node_iter_cmp_heap, NULL);
72 static void sort_key_next(struct btree_node_iter_large *iter,
74 struct btree_node_iter_set *i)
76 i->k += __btree_node_offset_to_key(b, i->k)->u64s;
78 while (i->k != i->end &&
79 !__btree_node_offset_to_key(b, i->k)->u64s)
83 *i = iter->data[--iter->used];
86 /* regular sort_iters */
88 typedef int (*sort_cmp_fn)(struct btree *,
90 struct bkey_packed *);
92 static inline void __sort_iter_sift(struct sort_iter *iter,
100 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
102 swap(iter->data[i], iter->data[i + 1]);
105 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
108 __sort_iter_sift(iter, 0, cmp);
111 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
113 unsigned i = iter->used;
116 __sort_iter_sift(iter, i, cmp);
119 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
121 return iter->used ? iter->data->k : NULL;
124 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
126 iter->data->k = bkey_next_skip_noops(iter->data->k, iter->data->end);
128 BUG_ON(iter->data->k > iter->data->end);
130 if (iter->data->k == iter->data->end)
131 array_remove_item(iter->data, iter->used, 0);
133 sort_iter_sift(iter, cmp);
136 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
139 struct bkey_packed *ret = sort_iter_peek(iter);
142 sort_iter_advance(iter, cmp);
148 * Returns true if l > r - unless l == r, in which case returns true if l is
151 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
152 * equal in different sets, we have to process them newest to oldest.
154 #define key_sort_cmp(h, l, r) \
157 __btree_node_offset_to_key(b, (l).k), \
158 __btree_node_offset_to_key(b, (r).k)) \
163 static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
166 struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
167 struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
169 if (bkey_whiteout(k))
175 if (iter->used > 2 &&
176 key_sort_cmp(iter, r[0], r[1]) >= 0)
180 * key_sort_cmp() ensures that when keys compare equal the older key
181 * comes first; so if l->k compares equal to r->k then l->k is older and
184 return !bkey_cmp_packed(b,
185 __btree_node_offset_to_key(b, l->k),
186 __btree_node_offset_to_key(b, r->k));
189 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
191 struct btree_node_iter_large *iter)
193 struct bkey_packed *out = dst->start;
194 struct btree_nr_keys nr;
196 memset(&nr, 0, sizeof(nr));
198 heap_resort(iter, key_sort_cmp, NULL);
200 while (!bch2_btree_node_iter_large_end(iter)) {
201 if (!should_drop_next_key(iter, b)) {
202 struct bkey_packed *k =
203 __btree_node_offset_to_key(b, iter->data->k);
206 btree_keys_account_key_add(&nr, 0, out);
207 out = bkey_next(out);
210 sort_key_next(iter, b, iter->data);
211 heap_sift_down(iter, 0, key_sort_cmp, NULL);
214 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
219 * If keys compare equal, compare by pointer order:
221 * Necessary for sort_fix_overlapping() - if there are multiple keys that
222 * compare equal in different sets, we have to process them newest to oldest.
224 #define extent_sort_cmp(h, l, r) \
226 struct bkey _ul = bkey_unpack_key(b, \
227 __btree_node_offset_to_key(b, (l).k)); \
228 struct bkey _ur = bkey_unpack_key(b, \
229 __btree_node_offset_to_key(b, (r).k)); \
231 bkey_cmp(bkey_start_pos(&_ul), \
232 bkey_start_pos(&_ur)) ?: (r).k - (l).k; \
235 static inline void extent_sort_sift(struct btree_node_iter_large *iter,
236 struct btree *b, size_t i)
238 heap_sift_down(iter, i, extent_sort_cmp, NULL);
241 static inline void extent_sort_next(struct btree_node_iter_large *iter,
243 struct btree_node_iter_set *i)
245 sort_key_next(iter, b, i);
246 heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL);
249 static void extent_sort_advance_prev(struct bkey_format *f,
250 struct btree_nr_keys *nr,
251 struct bkey_packed *start,
252 struct bkey_packed **prev)
255 bch2_bkey_pack(*prev, (void *) *prev, f);
257 btree_keys_account_key_add(nr, 0, *prev);
258 *prev = bkey_next(*prev);
264 static void extent_sort_append(struct bch_fs *c,
265 struct bkey_format *f,
266 struct btree_nr_keys *nr,
267 struct bkey_packed *start,
268 struct bkey_packed **prev,
271 if (bkey_whiteout(k.k))
275 * prev is always unpacked, for key merging - until right before we
280 bch2_bkey_merge(c, bkey_i_to_s((void *) *prev), k) ==
284 extent_sort_advance_prev(f, nr, start, prev);
286 bkey_reassemble((void *) *prev, k.s_c);
289 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
292 struct btree_node_iter_large *iter)
294 struct bkey_format *f = &b->format;
295 struct btree_node_iter_set *_l = iter->data, *_r;
296 struct bkey_packed *prev = NULL, *lk, *rk;
297 struct bkey l_unpacked, r_unpacked;
299 struct btree_nr_keys nr;
300 struct bkey_on_stack split;
302 memset(&nr, 0, sizeof(nr));
303 bkey_on_stack_init(&split);
305 heap_resort(iter, extent_sort_cmp, NULL);
307 while (!bch2_btree_node_iter_large_end(iter)) {
308 lk = __btree_node_offset_to_key(b, _l->k);
309 l = __bkey_disassemble(b, lk, &l_unpacked);
311 if (iter->used == 1) {
312 extent_sort_append(c, f, &nr, dst->start, &prev, l);
313 extent_sort_next(iter, b, _l);
318 if (iter->used > 2 &&
319 extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
322 rk = __btree_node_offset_to_key(b, _r->k);
323 r = __bkey_disassemble(b, rk, &r_unpacked);
325 /* If current key and next key don't overlap, just append */
326 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
327 extent_sort_append(c, f, &nr, dst->start, &prev, l);
328 extent_sort_next(iter, b, _l);
332 /* Skip 0 size keys */
334 extent_sort_next(iter, b, _r);
339 * overlap: keep the newer key and trim the older key so they
340 * don't overlap. comparing pointers tells us which one is
341 * newer, since the bsets are appended one after the other.
344 /* can't happen because of comparison func */
345 BUG_ON(_l->k < _r->k &&
346 !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
350 if (bkey_cmp(l.k->p, r.k->p) >= 0) {
351 sort_key_next(iter, b, _r);
353 bch2_cut_front_s(l.k->p, r);
354 extent_save(b, rk, r.k);
357 extent_sort_sift(iter, b, _r - iter->data);
358 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
359 bkey_on_stack_realloc(&split, c, l.k->u64s);
362 * r wins, but it overlaps in the middle of l - split l:
364 bkey_reassemble(split.k, l.s_c);
365 bch2_cut_back(bkey_start_pos(r.k), split.k);
367 bch2_cut_front_s(r.k->p, l);
368 extent_save(b, lk, l.k);
370 extent_sort_sift(iter, b, 0);
372 extent_sort_append(c, f, &nr, dst->start,
373 &prev, bkey_i_to_s(split.k));
375 bch2_cut_back_s(bkey_start_pos(r.k), l);
376 extent_save(b, lk, l.k);
380 extent_sort_advance_prev(f, &nr, dst->start, &prev);
382 dst->u64s = cpu_to_le16((u64 *) prev - dst->_data);
384 bkey_on_stack_exit(&split, c);
388 /* Sort + repack in a new format: */
390 bch2_sort_repack(struct bset *dst, struct btree *src,
391 struct btree_node_iter *src_iter,
392 struct bkey_format *out_f,
393 bool filter_whiteouts)
395 struct bkey_format *in_f = &src->format;
396 struct bkey_packed *in, *out = vstruct_last(dst);
397 struct btree_nr_keys nr;
399 memset(&nr, 0, sizeof(nr));
401 while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
402 if (filter_whiteouts && bkey_whiteout(in))
405 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
406 ? in_f : &bch2_bkey_format_current, in))
407 out->format = KEY_FORMAT_LOCAL_BTREE;
409 bch2_bkey_unpack(src, (void *) out, in);
411 btree_keys_account_key_add(&nr, 0, out);
412 out = bkey_next(out);
415 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
419 /* Sort, repack, and merge: */
421 bch2_sort_repack_merge(struct bch_fs *c,
422 struct bset *dst, struct btree *src,
423 struct btree_node_iter *iter,
424 struct bkey_format *out_f,
425 bool filter_whiteouts)
427 struct bkey_packed *prev = NULL, *k_packed;
429 struct btree_nr_keys nr;
430 struct bkey unpacked;
432 memset(&nr, 0, sizeof(nr));
434 while ((k_packed = bch2_btree_node_iter_next_all(iter, src))) {
435 if (filter_whiteouts && bkey_whiteout(k_packed))
438 k = __bkey_disassemble(src, k_packed, &unpacked);
440 if (filter_whiteouts &&
441 bch2_bkey_normalize(c, k))
444 extent_sort_append(c, out_f, &nr, vstruct_last(dst), &prev, k);
447 extent_sort_advance_prev(out_f, &nr, vstruct_last(dst), &prev);
449 dst->u64s = cpu_to_le16((u64 *) prev - dst->_data);
453 static inline int sort_keys_cmp(struct btree *b,
454 struct bkey_packed *l,
455 struct bkey_packed *r)
457 return bkey_cmp_packed(b, l, r) ?:
458 (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
459 (int) l->needs_whiteout - (int) r->needs_whiteout;
462 unsigned bch2_sort_keys(struct bkey_packed *dst,
463 struct sort_iter *iter,
464 bool filter_whiteouts)
466 const struct bkey_format *f = &iter->b->format;
467 struct bkey_packed *in, *next, *out = dst;
469 sort_iter_sort(iter, sort_keys_cmp);
471 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
472 if (bkey_whiteout(in) &&
473 (filter_whiteouts || !in->needs_whiteout))
476 if (bkey_whiteout(in) &&
477 (next = sort_iter_peek(iter)) &&
478 !bkey_cmp_packed(iter->b, in, next)) {
479 BUG_ON(in->needs_whiteout &&
480 next->needs_whiteout);
482 * XXX racy, called with read lock from write path
484 * leads to spurious BUG_ON() in bkey_unpack_key() in
487 next->needs_whiteout |= in->needs_whiteout;
491 if (bkey_whiteout(in)) {
492 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
493 set_bkeyp_val_u64s(f, out, 0);
497 out = bkey_next(out);
500 return (u64 *) out - (u64 *) dst;
503 static inline int sort_extents_cmp(struct btree *b,
504 struct bkey_packed *l,
505 struct bkey_packed *r)
507 return bkey_cmp_packed(b, l, r) ?:
508 (int) bkey_deleted(l) - (int) bkey_deleted(r);
511 unsigned bch2_sort_extents(struct bkey_packed *dst,
512 struct sort_iter *iter,
513 bool filter_whiteouts)
515 struct bkey_packed *in, *out = dst;
517 sort_iter_sort(iter, sort_extents_cmp);
519 while ((in = sort_iter_next(iter, sort_extents_cmp))) {
520 if (bkey_deleted(in))
523 if (bkey_whiteout(in) &&
524 (filter_whiteouts || !in->needs_whiteout))
528 out = bkey_next(out);
531 return (u64 *) out - (u64 *) dst;
534 static inline int sort_key_whiteouts_cmp(struct btree *b,
535 struct bkey_packed *l,
536 struct bkey_packed *r)
538 return bkey_cmp_packed(b, l, r);
541 unsigned bch2_sort_key_whiteouts(struct bkey_packed *dst,
542 struct sort_iter *iter)
544 struct bkey_packed *in, *out = dst;
546 sort_iter_sort(iter, sort_key_whiteouts_cmp);
548 while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
550 out = bkey_next(out);
553 return (u64 *) out - (u64 *) dst;
556 static inline int sort_extent_whiteouts_cmp(struct btree *b,
557 struct bkey_packed *l,
558 struct bkey_packed *r)
560 struct bkey ul = bkey_unpack_key(b, l);
561 struct bkey ur = bkey_unpack_key(b, r);
563 return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
566 unsigned bch2_sort_extent_whiteouts(struct bkey_packed *dst,
567 struct sort_iter *iter)
569 const struct bkey_format *f = &iter->b->format;
570 struct bkey_packed *in, *out = dst;
572 bool prev = false, l_packed = false;
573 u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
574 u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
577 max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
579 sort_iter_sort(iter, sort_extent_whiteouts_cmp);
581 while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
582 if (bkey_deleted(in))
585 EBUG_ON(bkeyp_val_u64s(f, in));
586 EBUG_ON(in->type != KEY_TYPE_discard);
588 r.k = bkey_unpack_key(iter->b, in);
591 bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
592 if (bkey_cmp(l.k.p, r.k.p) >= 0)
596 ? min(max_packed_size, max_packed_offset -
597 bkey_start_offset(&l.k))
600 new_size = min(new_size, r.k.p.offset -
601 bkey_start_offset(&l.k));
603 BUG_ON(new_size < l.k.size);
605 bch2_key_resize(&l.k, new_size);
607 if (bkey_cmp(l.k.p, r.k.p) >= 0)
610 bch2_cut_front(l.k.p, &r);
614 if (!bch2_bkey_pack(out, &l, f)) {
618 out = bkey_next(out);
623 l_packed = bkey_packed(in);
627 if (!bch2_bkey_pack(out, &l, f)) {
631 out = bkey_next(out);
634 return (u64 *) out - (u64 *) dst;