1 // SPDX-License-Identifier: GPL-2.0
7 /* too many iterators, need to clean this up */
9 /* btree_node_iter_large: */
11 #define btree_node_iter_cmp_heap(h, _l, _r) btree_node_iter_cmp(b, _l, _r)
14 bch2_btree_node_iter_large_end(struct btree_node_iter_large *iter)
19 static inline struct bkey_packed *
20 bch2_btree_node_iter_large_peek_all(struct btree_node_iter_large *iter,
23 return bch2_btree_node_iter_large_end(iter)
25 : __btree_node_offset_to_key(b, iter->data->k);
29 bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter,
32 iter->data->k += __btree_node_offset_to_key(b, iter->data->k)->u64s;
35 EBUG_ON(iter->data->k > iter->data->end);
37 if (iter->data->k == iter->data->end)
38 heap_del(iter, 0, btree_node_iter_cmp_heap, NULL);
40 heap_sift_down(iter, 0, btree_node_iter_cmp_heap, NULL);
43 static inline struct bkey_packed *
44 bch2_btree_node_iter_large_next_all(struct btree_node_iter_large *iter,
47 struct bkey_packed *ret = bch2_btree_node_iter_large_peek_all(iter, b);
50 bch2_btree_node_iter_large_advance(iter, b);
55 void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter,
57 const struct bkey_packed *k,
58 const struct bkey_packed *end)
61 struct btree_node_iter_set n =
62 ((struct btree_node_iter_set) {
63 __btree_node_key_to_offset(b, k),
64 __btree_node_key_to_offset(b, end)
67 __heap_add(iter, n, btree_node_iter_cmp_heap, NULL);
71 static void sort_key_next(struct btree_node_iter_large *iter,
73 struct btree_node_iter_set *i)
75 i->k += __btree_node_offset_to_key(b, i->k)->u64s;
78 *i = iter->data[--iter->used];
81 /* regular sort_iters */
83 typedef int (*sort_cmp_fn)(struct btree *,
85 struct bkey_packed *);
87 static inline void __sort_iter_sift(struct sort_iter *iter,
95 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
97 swap(iter->data[i], iter->data[i + 1]);
100 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
103 __sort_iter_sift(iter, 0, cmp);
106 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
108 unsigned i = iter->used;
111 __sort_iter_sift(iter, i, cmp);
114 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
116 return iter->used ? iter->data->k : NULL;
119 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
121 iter->data->k = bkey_next(iter->data->k);
123 BUG_ON(iter->data->k > iter->data->end);
125 if (iter->data->k == iter->data->end)
126 array_remove_item(iter->data, iter->used, 0);
128 sort_iter_sift(iter, cmp);
131 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
134 struct bkey_packed *ret = sort_iter_peek(iter);
137 sort_iter_advance(iter, cmp);
143 * Returns true if l > r - unless l == r, in which case returns true if l is
146 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
147 * equal in different sets, we have to process them newest to oldest.
149 #define key_sort_cmp(h, l, r) \
152 __btree_node_offset_to_key(b, (l).k), \
153 __btree_node_offset_to_key(b, (r).k)) \
158 static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
161 struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
162 struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
164 if (bkey_whiteout(k))
170 if (iter->used > 2 &&
171 key_sort_cmp(iter, r[0], r[1]) >= 0)
175 * key_sort_cmp() ensures that when keys compare equal the older key
176 * comes first; so if l->k compares equal to r->k then l->k is older and
179 return !bkey_cmp_packed(b,
180 __btree_node_offset_to_key(b, l->k),
181 __btree_node_offset_to_key(b, r->k));
184 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
186 struct btree_node_iter_large *iter)
188 struct bkey_packed *out = dst->start;
189 struct btree_nr_keys nr;
191 memset(&nr, 0, sizeof(nr));
193 heap_resort(iter, key_sort_cmp, NULL);
195 while (!bch2_btree_node_iter_large_end(iter)) {
196 if (!should_drop_next_key(iter, b)) {
197 struct bkey_packed *k =
198 __btree_node_offset_to_key(b, iter->data->k);
201 btree_keys_account_key_add(&nr, 0, out);
202 out = bkey_next(out);
205 sort_key_next(iter, b, iter->data);
206 heap_sift_down(iter, 0, key_sort_cmp, NULL);
209 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
214 * If keys compare equal, compare by pointer order:
216 * Necessary for sort_fix_overlapping() - if there are multiple keys that
217 * compare equal in different sets, we have to process them newest to oldest.
219 #define extent_sort_cmp(h, l, r) \
221 struct bkey _ul = bkey_unpack_key(b, \
222 __btree_node_offset_to_key(b, (l).k)); \
223 struct bkey _ur = bkey_unpack_key(b, \
224 __btree_node_offset_to_key(b, (r).k)); \
226 bkey_cmp(bkey_start_pos(&_ul), \
227 bkey_start_pos(&_ur)) ?: (r).k - (l).k; \
230 static inline void extent_sort_sift(struct btree_node_iter_large *iter,
231 struct btree *b, size_t i)
233 heap_sift_down(iter, i, extent_sort_cmp, NULL);
236 static inline void extent_sort_next(struct btree_node_iter_large *iter,
238 struct btree_node_iter_set *i)
240 sort_key_next(iter, b, i);
241 heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL);
244 static void extent_sort_advance_prev(struct bkey_format *f,
245 struct btree_nr_keys *nr,
246 struct bkey_packed *start,
247 struct bkey_packed **prev)
250 bch2_bkey_pack(*prev, (void *) *prev, f);
252 btree_keys_account_key_add(nr, 0, *prev);
253 *prev = bkey_next(*prev);
259 static void extent_sort_append(struct bch_fs *c,
260 struct bkey_format *f,
261 struct btree_nr_keys *nr,
262 struct bkey_packed *start,
263 struct bkey_packed **prev,
266 if (bkey_whiteout(k.k))
270 * prev is always unpacked, for key merging - until right before we
275 bch2_bkey_merge(c, bkey_i_to_s((void *) *prev), k) ==
279 extent_sort_advance_prev(f, nr, start, prev);
281 bkey_reassemble((void *) *prev, k.s_c);
284 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
287 struct btree_node_iter_large *iter)
289 struct bkey_format *f = &b->format;
290 struct btree_node_iter_set *_l = iter->data, *_r;
291 struct bkey_packed *prev = NULL, *lk, *rk;
292 struct bkey l_unpacked, r_unpacked;
294 struct btree_nr_keys nr;
296 memset(&nr, 0, sizeof(nr));
298 heap_resort(iter, extent_sort_cmp, NULL);
300 while (!bch2_btree_node_iter_large_end(iter)) {
301 lk = __btree_node_offset_to_key(b, _l->k);
302 l = __bkey_disassemble(b, lk, &l_unpacked);
304 if (iter->used == 1) {
305 extent_sort_append(c, f, &nr, dst->start, &prev, l);
306 extent_sort_next(iter, b, _l);
311 if (iter->used > 2 &&
312 extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
315 rk = __btree_node_offset_to_key(b, _r->k);
316 r = __bkey_disassemble(b, rk, &r_unpacked);
318 /* If current key and next key don't overlap, just append */
319 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
320 extent_sort_append(c, f, &nr, dst->start, &prev, l);
321 extent_sort_next(iter, b, _l);
325 /* Skip 0 size keys */
327 extent_sort_next(iter, b, _r);
332 * overlap: keep the newer key and trim the older key so they
333 * don't overlap. comparing pointers tells us which one is
334 * newer, since the bsets are appended one after the other.
337 /* can't happen because of comparison func */
338 BUG_ON(_l->k < _r->k &&
339 !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
343 if (bkey_cmp(l.k->p, r.k->p) >= 0) {
344 sort_key_next(iter, b, _r);
346 __bch2_cut_front(l.k->p, r);
347 extent_save(b, rk, r.k);
350 extent_sort_sift(iter, b, _r - iter->data);
351 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
355 * r wins, but it overlaps in the middle of l - split l:
357 bkey_reassemble(&tmp.k, l.s_c);
358 bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
360 __bch2_cut_front(r.k->p, l);
361 extent_save(b, lk, l.k);
363 extent_sort_sift(iter, b, 0);
365 extent_sort_append(c, f, &nr, dst->start,
366 &prev, bkey_i_to_s(&tmp.k));
368 bch2_cut_back(bkey_start_pos(r.k), l.k);
369 extent_save(b, lk, l.k);
373 extent_sort_advance_prev(f, &nr, dst->start, &prev);
375 dst->u64s = cpu_to_le16((u64 *) prev - dst->_data);
379 /* Sort + repack in a new format: */
381 bch2_sort_repack(struct bset *dst, struct btree *src,
382 struct btree_node_iter *src_iter,
383 struct bkey_format *out_f,
384 bool filter_whiteouts)
386 struct bkey_format *in_f = &src->format;
387 struct bkey_packed *in, *out = vstruct_last(dst);
388 struct btree_nr_keys nr;
390 memset(&nr, 0, sizeof(nr));
392 while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
393 if (filter_whiteouts && bkey_whiteout(in))
396 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
397 ? in_f : &bch2_bkey_format_current, in))
398 out->format = KEY_FORMAT_LOCAL_BTREE;
400 bch2_bkey_unpack(src, (void *) out, in);
402 btree_keys_account_key_add(&nr, 0, out);
403 out = bkey_next(out);
406 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
410 /* Sort, repack, and merge: */
412 bch2_sort_repack_merge(struct bch_fs *c,
413 struct bset *dst, struct btree *src,
414 struct btree_node_iter *iter,
415 struct bkey_format *out_f,
416 bool filter_whiteouts)
418 struct bkey_packed *prev = NULL, *k_packed;
420 struct btree_nr_keys nr;
423 memset(&nr, 0, sizeof(nr));
425 while ((k_packed = bch2_btree_node_iter_next_all(iter, src))) {
426 if (filter_whiteouts && bkey_whiteout(k_packed))
429 EBUG_ON(bkeyp_val_u64s(&src->format, k_packed) >
430 BKEY_EXTENT_VAL_U64s_MAX);
432 bch2_bkey_unpack(src, &tmp.k, k_packed);
433 k = bkey_i_to_s(&tmp.k);
435 if (filter_whiteouts &&
436 bch2_bkey_normalize(c, k))
439 extent_sort_append(c, out_f, &nr, vstruct_last(dst), &prev, k);
442 extent_sort_advance_prev(out_f, &nr, vstruct_last(dst), &prev);
444 dst->u64s = cpu_to_le16((u64 *) prev - dst->_data);
448 static inline int sort_keys_cmp(struct btree *b,
449 struct bkey_packed *l,
450 struct bkey_packed *r)
452 return bkey_cmp_packed(b, l, r) ?:
453 (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
454 (int) l->needs_whiteout - (int) r->needs_whiteout;
457 unsigned bch2_sort_keys(struct bkey_packed *dst,
458 struct sort_iter *iter,
459 bool filter_whiteouts)
461 const struct bkey_format *f = &iter->b->format;
462 struct bkey_packed *in, *next, *out = dst;
464 sort_iter_sort(iter, sort_keys_cmp);
466 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
467 if (bkey_whiteout(in) &&
468 (filter_whiteouts || !in->needs_whiteout))
471 if (bkey_whiteout(in) &&
472 (next = sort_iter_peek(iter)) &&
473 !bkey_cmp_packed(iter->b, in, next)) {
474 BUG_ON(in->needs_whiteout &&
475 next->needs_whiteout);
477 * XXX racy, called with read lock from write path
479 * leads to spurious BUG_ON() in bkey_unpack_key() in
482 next->needs_whiteout |= in->needs_whiteout;
486 if (bkey_whiteout(in)) {
487 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
488 set_bkeyp_val_u64s(f, out, 0);
492 out = bkey_next(out);
495 return (u64 *) out - (u64 *) dst;
498 static inline int sort_extents_cmp(struct btree *b,
499 struct bkey_packed *l,
500 struct bkey_packed *r)
502 return bkey_cmp_packed(b, l, r) ?:
503 (int) bkey_deleted(l) - (int) bkey_deleted(r);
506 unsigned bch2_sort_extents(struct bkey_packed *dst,
507 struct sort_iter *iter,
508 bool filter_whiteouts)
510 struct bkey_packed *in, *out = dst;
512 sort_iter_sort(iter, sort_extents_cmp);
514 while ((in = sort_iter_next(iter, sort_extents_cmp))) {
515 if (bkey_deleted(in))
518 if (bkey_whiteout(in) &&
519 (filter_whiteouts || !in->needs_whiteout))
523 out = bkey_next(out);
526 return (u64 *) out - (u64 *) dst;
529 static inline int sort_key_whiteouts_cmp(struct btree *b,
530 struct bkey_packed *l,
531 struct bkey_packed *r)
533 return bkey_cmp_packed(b, l, r);
536 unsigned bch2_sort_key_whiteouts(struct bkey_packed *dst,
537 struct sort_iter *iter)
539 struct bkey_packed *in, *out = dst;
541 sort_iter_sort(iter, sort_key_whiteouts_cmp);
543 while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
545 out = bkey_next(out);
548 return (u64 *) out - (u64 *) dst;
551 static inline int sort_extent_whiteouts_cmp(struct btree *b,
552 struct bkey_packed *l,
553 struct bkey_packed *r)
555 struct bkey ul = bkey_unpack_key(b, l);
556 struct bkey ur = bkey_unpack_key(b, r);
558 return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
561 unsigned bch2_sort_extent_whiteouts(struct bkey_packed *dst,
562 struct sort_iter *iter)
564 const struct bkey_format *f = &iter->b->format;
565 struct bkey_packed *in, *out = dst;
567 bool prev = false, l_packed = false;
568 u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
569 u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
572 max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
574 sort_iter_sort(iter, sort_extent_whiteouts_cmp);
576 while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
577 if (bkey_deleted(in))
580 EBUG_ON(bkeyp_val_u64s(f, in));
581 EBUG_ON(in->type != KEY_TYPE_discard);
583 r.k = bkey_unpack_key(iter->b, in);
586 bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
587 if (bkey_cmp(l.k.p, r.k.p) >= 0)
591 ? min(max_packed_size, max_packed_offset -
592 bkey_start_offset(&l.k))
595 new_size = min(new_size, r.k.p.offset -
596 bkey_start_offset(&l.k));
598 BUG_ON(new_size < l.k.size);
600 bch2_key_resize(&l.k, new_size);
602 if (bkey_cmp(l.k.p, r.k.p) >= 0)
605 bch2_cut_front(l.k.p, &r);
609 if (!bch2_bkey_pack(out, &l, f)) {
613 out = bkey_next(out);
618 l_packed = bkey_packed(in);
622 if (!bch2_bkey_pack(out, &l, f)) {
626 out = bkey_next(out);
629 return (u64 *) out - (u64 *) dst;