6 /* too many iterators, need to clean this up */
8 /* btree_node_iter_large: */
10 #define btree_node_iter_cmp_heap(h, _l, _r) btree_node_iter_cmp(b, _l, _r)
13 bch2_btree_node_iter_large_end(struct btree_node_iter_large *iter)
18 static inline struct bkey_packed *
19 bch2_btree_node_iter_large_peek_all(struct btree_node_iter_large *iter,
22 return bch2_btree_node_iter_large_end(iter)
24 : __btree_node_offset_to_key(b, iter->data->k);
28 bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter,
31 iter->data->k += __btree_node_offset_to_key(b, iter->data->k)->u64s;
34 EBUG_ON(iter->data->k > iter->data->end);
36 if (iter->data->k == iter->data->end)
37 heap_del(iter, 0, btree_node_iter_cmp_heap, NULL);
39 heap_sift_down(iter, 0, btree_node_iter_cmp_heap, NULL);
42 static inline struct bkey_packed *
43 bch2_btree_node_iter_large_next_all(struct btree_node_iter_large *iter,
46 struct bkey_packed *ret = bch2_btree_node_iter_large_peek_all(iter, b);
49 bch2_btree_node_iter_large_advance(iter, b);
54 void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter,
56 const struct bkey_packed *k,
57 const struct bkey_packed *end)
60 struct btree_node_iter_set n =
61 ((struct btree_node_iter_set) {
62 __btree_node_key_to_offset(b, k),
63 __btree_node_key_to_offset(b, end)
66 __heap_add(iter, n, btree_node_iter_cmp_heap, NULL);
70 static void sort_key_next(struct btree_node_iter_large *iter,
72 struct btree_node_iter_set *i)
74 i->k += __btree_node_offset_to_key(b, i->k)->u64s;
77 *i = iter->data[--iter->used];
80 /* regular sort_iters */
82 typedef int (*sort_cmp_fn)(struct btree *,
84 struct bkey_packed *);
86 static inline void __sort_iter_sift(struct sort_iter *iter,
94 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
96 swap(iter->data[i], iter->data[i + 1]);
99 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
102 __sort_iter_sift(iter, 0, cmp);
105 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
107 unsigned i = iter->used;
110 __sort_iter_sift(iter, i, cmp);
113 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
115 return iter->used ? iter->data->k : NULL;
118 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
120 iter->data->k = bkey_next(iter->data->k);
122 BUG_ON(iter->data->k > iter->data->end);
124 if (iter->data->k == iter->data->end)
125 array_remove_item(iter->data, iter->used, 0);
127 sort_iter_sift(iter, cmp);
130 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
133 struct bkey_packed *ret = sort_iter_peek(iter);
136 sort_iter_advance(iter, cmp);
142 * Returns true if l > r - unless l == r, in which case returns true if l is
145 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
146 * equal in different sets, we have to process them newest to oldest.
148 #define key_sort_cmp(h, l, r) \
151 __btree_node_offset_to_key(b, (l).k), \
152 __btree_node_offset_to_key(b, (r).k)) \
157 static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
160 struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
161 struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
163 if (bkey_whiteout(k))
169 if (iter->used > 2 &&
170 key_sort_cmp(iter, r[0], r[1]) >= 0)
174 * key_sort_cmp() ensures that when keys compare equal the older key
175 * comes first; so if l->k compares equal to r->k then l->k is older and
178 return !bkey_cmp_packed(b,
179 __btree_node_offset_to_key(b, l->k),
180 __btree_node_offset_to_key(b, r->k));
183 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
185 struct btree_node_iter_large *iter)
187 struct bkey_packed *out = dst->start;
188 struct btree_nr_keys nr;
190 memset(&nr, 0, sizeof(nr));
192 heap_resort(iter, key_sort_cmp, NULL);
194 while (!bch2_btree_node_iter_large_end(iter)) {
195 if (!should_drop_next_key(iter, b)) {
196 struct bkey_packed *k =
197 __btree_node_offset_to_key(b, iter->data->k);
200 btree_keys_account_key_add(&nr, 0, out);
201 out = bkey_next(out);
204 sort_key_next(iter, b, iter->data);
205 heap_sift_down(iter, 0, key_sort_cmp, NULL);
208 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
213 * If keys compare equal, compare by pointer order:
215 * Necessary for sort_fix_overlapping() - if there are multiple keys that
216 * compare equal in different sets, we have to process them newest to oldest.
218 #define extent_sort_cmp(h, l, r) \
220 struct bkey _ul = bkey_unpack_key(b, \
221 __btree_node_offset_to_key(b, (l).k)); \
222 struct bkey _ur = bkey_unpack_key(b, \
223 __btree_node_offset_to_key(b, (r).k)); \
225 bkey_cmp(bkey_start_pos(&_ul), \
226 bkey_start_pos(&_ur)) ?: (r).k - (l).k; \
229 static inline void extent_sort_sift(struct btree_node_iter_large *iter,
230 struct btree *b, size_t i)
232 heap_sift_down(iter, i, extent_sort_cmp, NULL);
235 static inline void extent_sort_next(struct btree_node_iter_large *iter,
237 struct btree_node_iter_set *i)
239 sort_key_next(iter, b, i);
240 heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL);
243 static void extent_sort_append(struct bch_fs *c,
245 struct btree_nr_keys *nr,
246 struct bkey_packed *start,
247 struct bkey_packed **prev,
248 struct bkey_packed *k)
250 struct bkey_format *f = &b->format;
253 if (bkey_whiteout(k))
256 bch2_bkey_unpack(b, &tmp.k, k);
259 bch2_bkey_merge(c, (void *) *prev, &tmp.k))
263 bch2_bkey_pack(*prev, (void *) *prev, f);
265 btree_keys_account_key_add(nr, 0, *prev);
266 *prev = bkey_next(*prev);
271 bkey_copy(*prev, &tmp.k);
274 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
277 struct btree_node_iter_large *iter)
279 struct bkey_format *f = &b->format;
280 struct btree_node_iter_set *_l = iter->data, *_r;
281 struct bkey_packed *prev = NULL, *out, *lk, *rk;
282 struct bkey l_unpacked, r_unpacked;
284 struct btree_nr_keys nr;
286 memset(&nr, 0, sizeof(nr));
288 heap_resort(iter, extent_sort_cmp, NULL);
290 while (!bch2_btree_node_iter_large_end(iter)) {
291 lk = __btree_node_offset_to_key(b, _l->k);
293 if (iter->used == 1) {
294 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
295 extent_sort_next(iter, b, _l);
300 if (iter->used > 2 &&
301 extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
304 rk = __btree_node_offset_to_key(b, _r->k);
306 l = __bkey_disassemble(b, lk, &l_unpacked);
307 r = __bkey_disassemble(b, rk, &r_unpacked);
309 /* If current key and next key don't overlap, just append */
310 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
311 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
312 extent_sort_next(iter, b, _l);
316 /* Skip 0 size keys */
318 extent_sort_next(iter, b, _r);
323 * overlap: keep the newer key and trim the older key so they
324 * don't overlap. comparing pointers tells us which one is
325 * newer, since the bsets are appended one after the other.
328 /* can't happen because of comparison func */
329 BUG_ON(_l->k < _r->k &&
330 !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
334 if (bkey_cmp(l.k->p, r.k->p) >= 0) {
335 sort_key_next(iter, b, _r);
337 __bch2_cut_front(l.k->p, r);
338 extent_save(b, rk, r.k);
341 extent_sort_sift(iter, b, _r - iter->data);
342 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
346 * r wins, but it overlaps in the middle of l - split l:
348 bkey_reassemble(&tmp.k, l.s_c);
349 bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
351 __bch2_cut_front(r.k->p, l);
352 extent_save(b, lk, l.k);
354 extent_sort_sift(iter, b, 0);
356 extent_sort_append(c, b, &nr, dst->start, &prev,
357 bkey_to_packed(&tmp.k));
359 bch2_cut_back(bkey_start_pos(r.k), l.k);
360 extent_save(b, lk, l.k);
365 bch2_bkey_pack(prev, (void *) prev, f);
366 btree_keys_account_key_add(&nr, 0, prev);
367 out = bkey_next(prev);
372 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
376 /* Sort + repack in a new format: */
378 bch2_sort_repack(struct bset *dst, struct btree *src,
379 struct btree_node_iter *src_iter,
380 struct bkey_format *out_f,
381 bool filter_whiteouts)
383 struct bkey_format *in_f = &src->format;
384 struct bkey_packed *in, *out = vstruct_last(dst);
385 struct btree_nr_keys nr;
387 memset(&nr, 0, sizeof(nr));
389 while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
390 if (filter_whiteouts && bkey_whiteout(in))
393 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
394 ? in_f : &bch2_bkey_format_current, in))
395 out->format = KEY_FORMAT_LOCAL_BTREE;
397 bch2_bkey_unpack(src, (void *) out, in);
399 btree_keys_account_key_add(&nr, 0, out);
400 out = bkey_next(out);
403 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
407 /* Sort, repack, and merge: */
409 bch2_sort_repack_merge(struct bch_fs *c,
410 struct bset *dst, struct btree *src,
411 struct btree_node_iter *iter,
412 struct bkey_format *out_f,
413 bool filter_whiteouts)
415 struct bkey_packed *k, *prev = NULL, *out;
416 struct btree_nr_keys nr;
419 memset(&nr, 0, sizeof(nr));
421 while ((k = bch2_btree_node_iter_next_all(iter, src))) {
422 if (filter_whiteouts && bkey_whiteout(k))
426 * The filter might modify pointers, so we have to unpack the
427 * key and values to &tmp.k:
429 bch2_bkey_unpack(src, &tmp.k, k);
431 if (filter_whiteouts &&
432 bch2_bkey_normalize(c, bkey_i_to_s(&tmp.k)))
435 /* prev is always unpacked, for key merging: */
438 bch2_bkey_merge(c, (void *) prev, &tmp.k) ==
443 * the current key becomes the new prev: advance prev, then
444 * copy the current key - but first pack prev (in place):
447 bch2_bkey_pack(prev, (void *) prev, out_f);
449 btree_keys_account_key_add(&nr, 0, prev);
450 prev = bkey_next(prev);
452 prev = vstruct_last(dst);
455 bkey_copy(prev, &tmp.k);
459 bch2_bkey_pack(prev, (void *) prev, out_f);
460 btree_keys_account_key_add(&nr, 0, prev);
461 out = bkey_next(prev);
463 out = vstruct_last(dst);
466 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
470 static inline int sort_keys_cmp(struct btree *b,
471 struct bkey_packed *l,
472 struct bkey_packed *r)
474 return bkey_cmp_packed(b, l, r) ?:
475 (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
476 (int) l->needs_whiteout - (int) r->needs_whiteout;
479 unsigned bch2_sort_keys(struct bkey_packed *dst,
480 struct sort_iter *iter,
481 bool filter_whiteouts)
483 const struct bkey_format *f = &iter->b->format;
484 struct bkey_packed *in, *next, *out = dst;
486 sort_iter_sort(iter, sort_keys_cmp);
488 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
489 if (bkey_whiteout(in) &&
490 (filter_whiteouts || !in->needs_whiteout))
493 if (bkey_whiteout(in) &&
494 (next = sort_iter_peek(iter)) &&
495 !bkey_cmp_packed(iter->b, in, next)) {
496 BUG_ON(in->needs_whiteout &&
497 next->needs_whiteout);
499 * XXX racy, called with read lock from write path
501 * leads to spurious BUG_ON() in bkey_unpack_key() in
504 next->needs_whiteout |= in->needs_whiteout;
508 if (bkey_whiteout(in)) {
509 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
510 set_bkeyp_val_u64s(f, out, 0);
514 out = bkey_next(out);
517 return (u64 *) out - (u64 *) dst;
520 static inline int sort_extents_cmp(struct btree *b,
521 struct bkey_packed *l,
522 struct bkey_packed *r)
524 return bkey_cmp_packed(b, l, r) ?:
525 (int) bkey_deleted(l) - (int) bkey_deleted(r);
528 unsigned bch2_sort_extents(struct bkey_packed *dst,
529 struct sort_iter *iter,
530 bool filter_whiteouts)
532 struct bkey_packed *in, *out = dst;
534 sort_iter_sort(iter, sort_extents_cmp);
536 while ((in = sort_iter_next(iter, sort_extents_cmp))) {
537 if (bkey_deleted(in))
540 if (bkey_whiteout(in) &&
541 (filter_whiteouts || !in->needs_whiteout))
545 out = bkey_next(out);
548 return (u64 *) out - (u64 *) dst;
551 static inline int sort_key_whiteouts_cmp(struct btree *b,
552 struct bkey_packed *l,
553 struct bkey_packed *r)
555 return bkey_cmp_packed(b, l, r);
558 unsigned bch2_sort_key_whiteouts(struct bkey_packed *dst,
559 struct sort_iter *iter)
561 struct bkey_packed *in, *out = dst;
563 sort_iter_sort(iter, sort_key_whiteouts_cmp);
565 while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
567 out = bkey_next(out);
570 return (u64 *) out - (u64 *) dst;
573 static inline int sort_extent_whiteouts_cmp(struct btree *b,
574 struct bkey_packed *l,
575 struct bkey_packed *r)
577 struct bkey ul = bkey_unpack_key(b, l);
578 struct bkey ur = bkey_unpack_key(b, r);
580 return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
583 unsigned bch2_sort_extent_whiteouts(struct bkey_packed *dst,
584 struct sort_iter *iter)
586 const struct bkey_format *f = &iter->b->format;
587 struct bkey_packed *in, *out = dst;
589 bool prev = false, l_packed = false;
590 u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
591 u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
594 max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
596 sort_iter_sort(iter, sort_extent_whiteouts_cmp);
598 while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
599 if (bkey_deleted(in))
602 EBUG_ON(bkeyp_val_u64s(f, in));
603 EBUG_ON(in->type != KEY_TYPE_discard);
605 r.k = bkey_unpack_key(iter->b, in);
608 bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
609 if (bkey_cmp(l.k.p, r.k.p) >= 0)
613 ? min(max_packed_size, max_packed_offset -
614 bkey_start_offset(&l.k))
617 new_size = min(new_size, r.k.p.offset -
618 bkey_start_offset(&l.k));
620 BUG_ON(new_size < l.k.size);
622 bch2_key_resize(&l.k, new_size);
624 if (bkey_cmp(l.k.p, r.k.p) >= 0)
627 bch2_cut_front(l.k.p, &r);
631 if (!bch2_bkey_pack(out, &l, f)) {
635 out = bkey_next(out);
640 l_packed = bkey_packed(in);
644 if (!bch2_bkey_pack(out, &l, f)) {
648 out = bkey_next(out);
651 return (u64 *) out - (u64 *) dst;