1 // SPDX-License-Identifier: GPL-2.0
8 typedef int (*sort_cmp_fn)(struct btree *,
10 struct bkey_packed *);
12 static inline bool sort_iter_end(struct sort_iter *iter)
17 static inline void sort_iter_sift(struct sort_iter *iter, unsigned from,
24 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
26 swap(iter->data[i], iter->data[i + 1]);
29 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
31 unsigned i = iter->used;
34 sort_iter_sift(iter, i, cmp);
37 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
39 return !sort_iter_end(iter) ? iter->data->k : NULL;
42 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
44 struct sort_iter_set *i = iter->data;
48 i->k = bkey_next_skip_noops(i->k, i->end);
50 BUG_ON(i->k > i->end);
53 array_remove_item(iter->data, iter->used, 0);
55 sort_iter_sift(iter, 0, cmp);
58 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
61 struct bkey_packed *ret = sort_iter_peek(iter);
64 sort_iter_advance(iter, cmp);
70 * If keys compare equal, compare by pointer order:
72 static inline int key_sort_fix_overlapping_cmp(struct btree *b,
73 struct bkey_packed *l,
74 struct bkey_packed *r)
76 return bch2_bkey_cmp_packed(b, l, r) ?:
77 cmp_int((unsigned long) l, (unsigned long) r);
80 static inline bool should_drop_next_key(struct sort_iter *iter)
83 * key_sort_cmp() ensures that when keys compare equal the older key
84 * comes first; so if l->k compares equal to r->k then l->k is older
85 * and should be dropped.
87 return iter->used >= 2 &&
88 !bch2_bkey_cmp_packed(iter->b,
94 bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
95 struct sort_iter *iter)
97 struct bkey_packed *out = dst->start;
98 struct bkey_packed *k;
99 struct btree_nr_keys nr;
101 memset(&nr, 0, sizeof(nr));
103 sort_iter_sort(iter, key_sort_fix_overlapping_cmp);
105 while ((k = sort_iter_peek(iter))) {
106 if (!bkey_deleted(k) &&
107 !should_drop_next_key(iter)) {
109 btree_keys_account_key_add(&nr, 0, out);
110 out = bkey_next(out);
113 sort_iter_advance(iter, key_sort_fix_overlapping_cmp);
116 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
120 static void extent_sort_append(struct bch_fs *c,
121 struct bkey_format *f,
122 struct btree_nr_keys *nr,
123 struct bkey_packed **out,
126 if (!bkey_deleted(k.k)) {
127 if (!bch2_bkey_pack_key(*out, k.k, f))
128 memcpy_u64s_small(*out, k.k, BKEY_U64s);
130 memcpy_u64s_small(bkeyp_val(f, *out), k.v, bkey_val_u64s(k.k));
132 btree_keys_account_key_add(nr, 0, *out);
133 *out = bkey_next(*out);
137 /* Sort + repack in a new format: */
139 bch2_sort_repack(struct bset *dst, struct btree *src,
140 struct btree_node_iter *src_iter,
141 struct bkey_format *out_f,
142 bool filter_whiteouts)
144 struct bkey_format *in_f = &src->format;
145 struct bkey_packed *in, *out = vstruct_last(dst);
146 struct btree_nr_keys nr;
148 memset(&nr, 0, sizeof(nr));
150 while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
151 if (filter_whiteouts && bkey_deleted(in))
154 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
155 ? in_f : &bch2_bkey_format_current, in))
156 out->format = KEY_FORMAT_LOCAL_BTREE;
158 bch2_bkey_unpack(src, (void *) out, in);
160 btree_keys_account_key_add(&nr, 0, out);
161 out = bkey_next(out);
164 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
168 /* Sort, repack, and call bch2_bkey_normalize() to drop stale pointers: */
170 bch2_sort_repack_merge(struct bch_fs *c,
171 struct bset *dst, struct btree *src,
172 struct btree_node_iter *iter,
173 struct bkey_format *out_f,
174 bool filter_whiteouts)
176 struct bkey_packed *out = vstruct_last(dst), *k_packed;
178 struct btree_nr_keys nr;
180 memset(&nr, 0, sizeof(nr));
181 bch2_bkey_buf_init(&k);
183 while ((k_packed = bch2_btree_node_iter_next_all(iter, src))) {
184 if (filter_whiteouts && bkey_deleted(k_packed))
189 * bch2_bkey_normalize may modify the key we pass it (dropping
190 * stale pointers) and we don't have a write lock on the src
191 * node; we have to make a copy of the entire key before calling
194 bch2_bkey_buf_realloc(&k, c, k_packed->u64s + BKEY_U64s);
195 bch2_bkey_unpack(src, k.k, k_packed);
197 if (filter_whiteouts &&
198 bch2_bkey_normalize(c, bkey_i_to_s(k.k)))
201 extent_sort_append(c, out_f, &nr, &out, bkey_i_to_s(k.k));
204 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
205 bch2_bkey_buf_exit(&k, c);
209 static inline int sort_keys_cmp(struct btree *b,
210 struct bkey_packed *l,
211 struct bkey_packed *r)
213 return bch2_bkey_cmp_packed(b, l, r) ?:
214 (int) bkey_deleted(r) - (int) bkey_deleted(l) ?:
215 (int) l->needs_whiteout - (int) r->needs_whiteout;
218 unsigned bch2_sort_keys(struct bkey_packed *dst,
219 struct sort_iter *iter,
220 bool filter_whiteouts)
222 const struct bkey_format *f = &iter->b->format;
223 struct bkey_packed *in, *next, *out = dst;
225 sort_iter_sort(iter, sort_keys_cmp);
227 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
228 bool needs_whiteout = false;
230 if (bkey_deleted(in) &&
231 (filter_whiteouts || !in->needs_whiteout))
234 while ((next = sort_iter_peek(iter)) &&
235 !bch2_bkey_cmp_packed(iter->b, in, next)) {
236 BUG_ON(in->needs_whiteout &&
237 next->needs_whiteout);
238 needs_whiteout |= in->needs_whiteout;
239 in = sort_iter_next(iter, sort_keys_cmp);
242 if (bkey_deleted(in)) {
243 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
244 set_bkeyp_val_u64s(f, out, 0);
248 out->needs_whiteout |= needs_whiteout;
249 out = bkey_next(out);
252 return (u64 *) out - (u64 *) dst;