1 // SPDX-License-Identifier: GPL-2.0
3 #include "bkey_on_stack.h"
8 typedef int (*sort_cmp_fn)(struct btree *,
10 struct bkey_packed *);
12 static inline bool sort_iter_end(struct sort_iter *iter)
17 static inline void __sort_iter_sift(struct sort_iter *iter,
25 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
27 swap(iter->data[i], iter->data[i + 1]);
30 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
33 __sort_iter_sift(iter, 0, cmp);
36 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
38 unsigned i = iter->used;
41 __sort_iter_sift(iter, i, cmp);
44 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
46 return !sort_iter_end(iter) ? iter->data->k : NULL;
49 static inline void __sort_iter_advance(struct sort_iter *iter,
50 unsigned idx, sort_cmp_fn cmp)
52 struct sort_iter_set *i = iter->data + idx;
54 BUG_ON(idx >= iter->used);
56 i->k = bkey_next_skip_noops(i->k, i->end);
58 BUG_ON(i->k > i->end);
61 array_remove_item(iter->data, iter->used, idx);
63 __sort_iter_sift(iter, idx, cmp);
66 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
68 __sort_iter_advance(iter, 0, cmp);
71 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
74 struct bkey_packed *ret = sort_iter_peek(iter);
77 sort_iter_advance(iter, cmp);
83 * If keys compare equal, compare by pointer order:
85 static inline int key_sort_fix_overlapping_cmp(struct btree *b,
86 struct bkey_packed *l,
87 struct bkey_packed *r)
89 return bkey_cmp_packed(b, l, r) ?:
90 cmp_int((unsigned long) l, (unsigned long) r);
93 static inline bool should_drop_next_key(struct sort_iter *iter)
96 * key_sort_cmp() ensures that when keys compare equal the older key
97 * comes first; so if l->k compares equal to r->k then l->k is older
98 * and should be dropped.
100 return iter->used >= 2 &&
101 !bkey_cmp_packed(iter->b,
107 bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
108 struct sort_iter *iter)
110 struct bkey_packed *out = dst->start;
111 struct bkey_packed *k;
112 struct btree_nr_keys nr;
114 memset(&nr, 0, sizeof(nr));
116 sort_iter_sort(iter, key_sort_fix_overlapping_cmp);
118 while ((k = sort_iter_peek(iter))) {
119 if (!bkey_whiteout(k) &&
120 !should_drop_next_key(iter)) {
122 btree_keys_account_key_add(&nr, 0, out);
123 out = bkey_next(out);
126 sort_iter_advance(iter, key_sort_fix_overlapping_cmp);
129 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
133 static void extent_sort_advance_prev(struct bkey_format *f,
134 struct btree_nr_keys *nr,
135 struct bkey_packed *start,
136 struct bkey_packed **prev)
139 bch2_bkey_pack(*prev, (void *) *prev, f);
141 btree_keys_account_key_add(nr, 0, *prev);
142 *prev = bkey_next(*prev);
148 static void extent_sort_append(struct bch_fs *c,
149 struct bkey_format *f,
150 struct btree_nr_keys *nr,
151 struct bkey_packed *start,
152 struct bkey_packed **prev,
155 if (bkey_whiteout(k.k))
159 * prev is always unpacked, for key merging - until right before we
164 bch2_bkey_merge(c, bkey_i_to_s((void *) *prev), k) ==
168 extent_sort_advance_prev(f, nr, start, prev);
170 bkey_reassemble((void *) *prev, k.s_c);
173 /* Sort + repack in a new format: */
175 bch2_sort_repack(struct bset *dst, struct btree *src,
176 struct btree_node_iter *src_iter,
177 struct bkey_format *out_f,
178 bool filter_whiteouts)
180 struct bkey_format *in_f = &src->format;
181 struct bkey_packed *in, *out = vstruct_last(dst);
182 struct btree_nr_keys nr;
184 memset(&nr, 0, sizeof(nr));
186 while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
187 if (filter_whiteouts && bkey_whiteout(in))
190 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
191 ? in_f : &bch2_bkey_format_current, in))
192 out->format = KEY_FORMAT_LOCAL_BTREE;
194 bch2_bkey_unpack(src, (void *) out, in);
196 btree_keys_account_key_add(&nr, 0, out);
197 out = bkey_next(out);
200 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
204 /* Sort, repack, and merge: */
206 bch2_sort_repack_merge(struct bch_fs *c,
207 struct bset *dst, struct btree *src,
208 struct btree_node_iter *iter,
209 struct bkey_format *out_f,
210 bool filter_whiteouts)
212 struct bkey_packed *prev = NULL, *k_packed;
214 struct btree_nr_keys nr;
215 struct bkey unpacked;
217 memset(&nr, 0, sizeof(nr));
219 while ((k_packed = bch2_btree_node_iter_next_all(iter, src))) {
220 if (filter_whiteouts && bkey_whiteout(k_packed))
223 k = __bkey_disassemble(src, k_packed, &unpacked);
225 if (filter_whiteouts &&
226 bch2_bkey_normalize(c, k))
229 extent_sort_append(c, out_f, &nr, vstruct_last(dst), &prev, k);
232 extent_sort_advance_prev(out_f, &nr, vstruct_last(dst), &prev);
234 dst->u64s = cpu_to_le16((u64 *) prev - dst->_data);
238 static inline int sort_keys_cmp(struct btree *b,
239 struct bkey_packed *l,
240 struct bkey_packed *r)
242 return bkey_cmp_packed(b, l, r) ?:
243 (int) bkey_deleted(r) - (int) bkey_deleted(l) ?:
244 (int) l->needs_whiteout - (int) r->needs_whiteout;
247 unsigned bch2_sort_keys(struct bkey_packed *dst,
248 struct sort_iter *iter,
249 bool filter_whiteouts)
251 const struct bkey_format *f = &iter->b->format;
252 struct bkey_packed *in, *next, *out = dst;
254 sort_iter_sort(iter, sort_keys_cmp);
256 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
257 if (bkey_whiteout(in) &&
258 (filter_whiteouts || !in->needs_whiteout))
261 if (bkey_whiteout(in) &&
262 (next = sort_iter_peek(iter)) &&
263 !bkey_cmp_packed(iter->b, in, next)) {
264 BUG_ON(in->needs_whiteout &&
265 next->needs_whiteout);
267 * XXX racy, called with read lock from write path
269 * leads to spurious BUG_ON() in bkey_unpack_key() in
272 next->needs_whiteout |= in->needs_whiteout;
276 if (bkey_whiteout(in)) {
277 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
278 set_bkeyp_val_u64s(f, out, 0);
282 out = bkey_next(out);
285 return (u64 *) out - (u64 *) dst;
288 /* Compat code for btree_node_old_extent_overwrite: */
291 * If keys compare equal, compare by pointer order:
293 * Necessary for sort_fix_overlapping() - if there are multiple keys that
294 * compare equal in different sets, we have to process them newest to oldest.
296 static inline int extent_sort_fix_overlapping_cmp(struct btree *b,
297 struct bkey_packed *l,
298 struct bkey_packed *r)
300 struct bkey ul = bkey_unpack_key(b, l);
301 struct bkey ur = bkey_unpack_key(b, r);
303 return bkey_cmp(bkey_start_pos(&ul),
304 bkey_start_pos(&ur)) ?:
305 cmp_int((unsigned long) r, (unsigned long) l);
309 bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
310 struct sort_iter *iter)
312 struct btree *b = iter->b;
313 struct bkey_format *f = &b->format;
314 struct sort_iter_set *_l = iter->data, *_r = iter->data + 1;
315 struct bkey_packed *prev = NULL;
316 struct bkey l_unpacked, r_unpacked;
318 struct btree_nr_keys nr;
319 struct bkey_on_stack split;
321 memset(&nr, 0, sizeof(nr));
322 bkey_on_stack_init(&split);
324 sort_iter_sort(iter, extent_sort_fix_overlapping_cmp);
326 while (!sort_iter_end(iter)) {
327 l = __bkey_disassemble(b, _l->k, &l_unpacked);
329 if (iter->used == 1) {
330 extent_sort_append(c, f, &nr, dst->start, &prev, l);
331 sort_iter_advance(iter,
332 extent_sort_fix_overlapping_cmp);
336 r = __bkey_disassemble(b, _r->k, &r_unpacked);
338 /* If current key and next key don't overlap, just append */
339 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
340 extent_sort_append(c, f, &nr, dst->start, &prev, l);
341 sort_iter_advance(iter,
342 extent_sort_fix_overlapping_cmp);
346 /* Skip 0 size keys */
348 __sort_iter_advance(iter, 1,
349 extent_sort_fix_overlapping_cmp);
354 * overlap: keep the newer key and trim the older key so they
355 * don't overlap. comparing pointers tells us which one is
356 * newer, since the bsets are appended one after the other.
359 /* can't happen because of comparison func */
360 BUG_ON(_l->k < _r->k &&
361 !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
365 if (bkey_cmp(l.k->p, r.k->p) >= 0) {
366 __sort_iter_advance(iter, 1,
367 extent_sort_fix_overlapping_cmp);
369 bch2_cut_front_s(l.k->p, r);
370 extent_save(b, _r->k, r.k);
371 __sort_iter_sift(iter, 1,
372 extent_sort_fix_overlapping_cmp);
374 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
377 * r wins, but it overlaps in the middle of l - split l:
379 bkey_on_stack_reassemble(&split, c, l.s_c);
380 bch2_cut_back(bkey_start_pos(r.k), split.k);
382 bch2_cut_front_s(r.k->p, l);
383 extent_save(b, _l->k, l.k);
385 __sort_iter_sift(iter, 0,
386 extent_sort_fix_overlapping_cmp);
388 extent_sort_append(c, f, &nr, dst->start,
389 &prev, bkey_i_to_s(split.k));
391 bch2_cut_back_s(bkey_start_pos(r.k), l);
392 extent_save(b, _l->k, l.k);
396 extent_sort_advance_prev(f, &nr, dst->start, &prev);
398 dst->u64s = cpu_to_le16((u64 *) prev - dst->_data);
400 bkey_on_stack_exit(&split, c);
404 static inline int sort_extents_cmp(struct btree *b,
405 struct bkey_packed *l,
406 struct bkey_packed *r)
408 return bkey_cmp_packed(b, l, r) ?:
409 (int) bkey_deleted(l) - (int) bkey_deleted(r);
412 unsigned bch2_sort_extents(struct bkey_packed *dst,
413 struct sort_iter *iter,
414 bool filter_whiteouts)
416 struct bkey_packed *in, *out = dst;
418 sort_iter_sort(iter, sort_extents_cmp);
420 while ((in = sort_iter_next(iter, sort_extents_cmp))) {
421 if (bkey_deleted(in))
424 if (bkey_whiteout(in) &&
425 (filter_whiteouts || !in->needs_whiteout))
429 out = bkey_next(out);
432 return (u64 *) out - (u64 *) dst;
435 static inline int sort_extent_whiteouts_cmp(struct btree *b,
436 struct bkey_packed *l,
437 struct bkey_packed *r)
439 struct bkey ul = bkey_unpack_key(b, l);
440 struct bkey ur = bkey_unpack_key(b, r);
442 return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
445 unsigned bch2_sort_extent_whiteouts(struct bkey_packed *dst,
446 struct sort_iter *iter)
448 const struct bkey_format *f = &iter->b->format;
449 struct bkey_packed *in, *out = dst;
451 bool prev = false, l_packed = false;
452 u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
453 u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
456 max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
458 sort_iter_sort(iter, sort_extent_whiteouts_cmp);
460 while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
461 if (bkey_deleted(in))
464 EBUG_ON(bkeyp_val_u64s(f, in));
465 EBUG_ON(in->type != KEY_TYPE_discard);
467 r.k = bkey_unpack_key(iter->b, in);
470 bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
471 if (bkey_cmp(l.k.p, r.k.p) >= 0)
475 ? min(max_packed_size, max_packed_offset -
476 bkey_start_offset(&l.k))
479 new_size = min(new_size, r.k.p.offset -
480 bkey_start_offset(&l.k));
482 BUG_ON(new_size < l.k.size);
484 bch2_key_resize(&l.k, new_size);
486 if (bkey_cmp(l.k.p, r.k.p) >= 0)
489 bch2_cut_front(l.k.p, &r);
493 if (!bch2_bkey_pack(out, &l, f)) {
497 out = bkey_next(out);
502 l_packed = bkey_packed(in);
506 if (!bch2_bkey_pack(out, &l, f)) {
510 out = bkey_next(out);
513 return (u64 *) out - (u64 *) dst;