1 #ifndef _BCACHEFS_EXTENTS_H
2 #define _BCACHEFS_EXTENTS_H
6 #include "extents_types.h"
10 struct btree_node_iter;
12 struct btree_insert_entry;
13 struct extent_insert_hook;
17 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *,
19 struct btree_node_iter *);
20 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
23 struct btree_node_iter *);
25 extern const struct bkey_ops bch2_bkey_btree_ops;
26 extern const struct bkey_ops bch2_bkey_extent_ops;
28 struct extent_pick_ptr
29 bch2_btree_pick_ptr(struct bch_fs *, const struct btree *,
30 struct bch_devs_mask *avoid);
32 void bch2_extent_pick_ptr(struct bch_fs *, struct bkey_s_c,
33 struct bch_devs_mask *,
34 struct extent_pick_ptr *);
37 bch2_insert_fixup_extent(struct btree_insert *,
38 struct btree_insert_entry *);
40 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
41 void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent,
44 const struct bch_extent_ptr *
45 bch2_extent_has_device(struct bkey_s_c_extent, unsigned);
46 bool bch2_extent_drop_device(struct bkey_s_extent, unsigned);
47 const struct bch_extent_ptr *
48 bch2_extent_has_group(struct bch_fs *, struct bkey_s_c_extent, unsigned);
49 const struct bch_extent_ptr *
50 bch2_extent_has_target(struct bch_fs *, struct bkey_s_c_extent, unsigned);
52 unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent);
53 unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c);
54 unsigned bch2_extent_nr_good_ptrs(struct bch_fs *, struct bkey_s_c_extent);
55 unsigned bch2_extent_is_compressed(struct bkey_s_c);
57 bool bch2_extent_matches_ptr(struct bch_fs *, struct bkey_s_c_extent,
58 struct bch_extent_ptr, u64);
60 static inline bool bkey_extent_is_data(const struct bkey *k)
64 case BCH_EXTENT_CACHED:
71 static inline bool bkey_extent_is_allocation(const struct bkey *k)
75 case BCH_EXTENT_CACHED:
83 static inline bool bch2_extent_is_fully_allocated(struct bkey_s_c k)
85 return bkey_extent_is_allocation(k.k) &&
86 !bch2_extent_is_compressed(k);
89 static inline bool bkey_extent_is_cached(const struct bkey *k)
91 return k->type == BCH_EXTENT_CACHED;
94 static inline void bkey_extent_set_cached(struct bkey *k, bool cached)
96 EBUG_ON(k->type != BCH_EXTENT &&
97 k->type != BCH_EXTENT_CACHED);
99 k->type = cached ? BCH_EXTENT_CACHED : BCH_EXTENT;
102 static inline unsigned
103 __extent_entry_type(const union bch_extent_entry *e)
105 return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
108 static inline enum bch_extent_entry_type
109 extent_entry_type(const union bch_extent_entry *e)
111 int ret = __ffs(e->type);
113 EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
118 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
120 switch (extent_entry_type(entry)) {
121 case BCH_EXTENT_ENTRY_crc32:
122 return sizeof(struct bch_extent_crc32);
123 case BCH_EXTENT_ENTRY_crc64:
124 return sizeof(struct bch_extent_crc64);
125 case BCH_EXTENT_ENTRY_crc128:
126 return sizeof(struct bch_extent_crc128);
127 case BCH_EXTENT_ENTRY_ptr:
128 return sizeof(struct bch_extent_ptr);
134 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
136 return extent_entry_bytes(entry) / sizeof(u64);
139 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
141 return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
144 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
146 return !extent_entry_is_ptr(e);
149 union bch_extent_crc {
151 struct bch_extent_crc32 crc32;
152 struct bch_extent_crc64 crc64;
153 struct bch_extent_crc128 crc128;
156 /* downcast, preserves const */
157 #define to_entry(_entry) \
159 BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) && \
160 !type_is(_entry, struct bch_extent_ptr *)); \
162 __builtin_choose_expr( \
163 (type_is_exact(_entry, const union bch_extent_crc *) || \
164 type_is_exact(_entry, const struct bch_extent_ptr *)), \
165 (const union bch_extent_entry *) (_entry), \
166 (union bch_extent_entry *) (_entry)); \
169 #define __entry_to_crc(_entry) \
170 __builtin_choose_expr( \
171 type_is_exact(_entry, const union bch_extent_entry *), \
172 (const union bch_extent_crc *) (_entry), \
173 (union bch_extent_crc *) (_entry))
175 #define entry_to_crc(_entry) \
177 EBUG_ON((_entry) && !extent_entry_is_crc(_entry)); \
179 __entry_to_crc(_entry); \
182 #define entry_to_ptr(_entry) \
184 EBUG_ON((_entry) && !extent_entry_is_ptr(_entry)); \
186 __builtin_choose_expr( \
187 type_is_exact(_entry, const union bch_extent_entry *), \
188 (const struct bch_extent_ptr *) (_entry), \
189 (struct bch_extent_ptr *) (_entry)); \
192 /* checksum entries: */
194 enum bch_extent_crc_type {
201 static inline enum bch_extent_crc_type
202 __extent_crc_type(const union bch_extent_crc *crc)
205 return BCH_EXTENT_CRC_NONE;
207 switch (extent_entry_type(to_entry(crc))) {
208 case BCH_EXTENT_ENTRY_crc32:
209 return BCH_EXTENT_CRC32;
210 case BCH_EXTENT_ENTRY_crc64:
211 return BCH_EXTENT_CRC64;
212 case BCH_EXTENT_ENTRY_crc128:
213 return BCH_EXTENT_CRC128;
219 #define extent_crc_type(_crc) \
221 BUILD_BUG_ON(!type_is(_crc, struct bch_extent_crc32 *) && \
222 !type_is(_crc, struct bch_extent_crc64 *) && \
223 !type_is(_crc, struct bch_extent_crc128 *) && \
224 !type_is(_crc, union bch_extent_crc *)); \
226 type_is(_crc, struct bch_extent_crc32 *) ? BCH_EXTENT_CRC32 \
227 : type_is(_crc, struct bch_extent_crc64 *) ? BCH_EXTENT_CRC64 \
228 : type_is(_crc, struct bch_extent_crc128 *) ? BCH_EXTENT_CRC128 \
229 : __extent_crc_type((union bch_extent_crc *) _crc); \
232 static inline struct bch_extent_crc_unpacked
233 bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
235 #define common_fields(_crc) \
236 .csum_type = _crc.csum_type, \
237 .compression_type = _crc.compression_type, \
238 .compressed_size = _crc._compressed_size + 1, \
239 .uncompressed_size = _crc._uncompressed_size + 1, \
240 .offset = _crc.offset, \
243 switch (extent_crc_type(crc)) {
244 case BCH_EXTENT_CRC_NONE:
245 return (struct bch_extent_crc_unpacked) {
246 .compressed_size = k->size,
247 .uncompressed_size = k->size,
248 .live_size = k->size,
250 case BCH_EXTENT_CRC32:
251 return (struct bch_extent_crc_unpacked) {
252 common_fields(crc->crc32),
253 .csum.lo = (__force __le64) crc->crc32.csum,
255 case BCH_EXTENT_CRC64:
256 return (struct bch_extent_crc_unpacked) {
257 common_fields(crc->crc64),
258 .nonce = crc->crc64.nonce,
259 .csum.lo = (__force __le64) crc->crc64.csum_lo,
260 .csum.hi = (__force __le64) crc->crc64.csum_hi,
262 case BCH_EXTENT_CRC128:
263 return (struct bch_extent_crc_unpacked) {
264 common_fields(crc->crc128),
265 .nonce = crc->crc128.nonce,
266 .csum = crc->crc128.csum,
274 /* Extent entry iteration: */
276 #define extent_entry_next(_entry) \
277 ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
279 #define extent_entry_last(_e) \
280 vstruct_idx((_e).v, bkey_val_u64s((_e).k))
282 /* Iterate over all entries: */
284 #define extent_for_each_entry_from(_e, _entry, _start) \
285 for ((_entry) = _start; \
286 (_entry) < extent_entry_last(_e); \
287 (_entry) = extent_entry_next(_entry))
289 #define extent_for_each_entry(_e, _entry) \
290 extent_for_each_entry_from(_e, _entry, (_e).v->start)
292 /* Iterate over crcs only: */
294 #define __extent_crc_next(_e, _p) \
296 typeof(&(_e).v->start[0]) _entry = _p; \
298 while ((_entry) < extent_entry_last(_e) && \
299 !extent_entry_is_crc(_entry)) \
300 (_entry) = extent_entry_next(_entry); \
302 entry_to_crc(_entry < extent_entry_last(_e) ? _entry : NULL); \
305 #define __extent_for_each_crc(_e, _crc) \
306 for ((_crc) = __extent_crc_next(_e, (_e).v->start); \
308 (_crc) = __extent_crc_next(_e, extent_entry_next(to_entry(_crc))))
310 #define extent_crc_next(_e, _crc, _iter) \
312 extent_for_each_entry_from(_e, _iter, _iter) \
313 if (extent_entry_is_crc(_iter)) { \
314 (_crc) = bch2_extent_crc_unpack((_e).k, entry_to_crc(_iter));\
318 (_iter) < extent_entry_last(_e); \
321 #define extent_for_each_crc(_e, _crc, _iter) \
322 for ((_crc) = bch2_extent_crc_unpack((_e).k, NULL), \
323 (_iter) = (_e).v->start; \
324 extent_crc_next(_e, _crc, _iter); \
325 (_iter) = extent_entry_next(_iter))
327 /* Iterate over pointers, with crcs: */
329 #define extent_ptr_crc_next(_e, _ptr, _crc) \
332 typeof(&(_e).v->start[0]) _entry; \
334 extent_for_each_entry_from(_e, _entry, to_entry(_ptr)) \
335 if (extent_entry_is_crc(_entry)) { \
336 (_crc) = bch2_extent_crc_unpack((_e).k, entry_to_crc(_entry));\
338 _ptr = entry_to_ptr(_entry); \
347 #define extent_for_each_ptr_crc(_e, _ptr, _crc) \
348 for ((_crc) = bch2_extent_crc_unpack((_e).k, NULL), \
349 (_ptr) = &(_e).v->start->ptr; \
350 ((_ptr) = extent_ptr_crc_next(_e, _ptr, _crc)); \
353 /* Iterate over pointers only, and from a given position: */
355 #define extent_ptr_next(_e, _ptr) \
357 struct bch_extent_crc_unpacked _crc; \
359 extent_ptr_crc_next(_e, _ptr, _crc); \
362 #define extent_for_each_ptr(_e, _ptr) \
363 for ((_ptr) = &(_e).v->start->ptr; \
364 ((_ptr) = extent_ptr_next(_e, _ptr)); \
367 #define extent_ptr_prev(_e, _ptr) \
369 typeof(&(_e).v->start->ptr) _p; \
370 typeof(&(_e).v->start->ptr) _prev = NULL; \
372 extent_for_each_ptr(_e, _p) { \
382 * Use this when you'll be dropping pointers as you iterate. Quadratic,
385 #define extent_for_each_ptr_backwards(_e, _ptr) \
386 for ((_ptr) = extent_ptr_prev(_e, NULL); \
388 (_ptr) = extent_ptr_prev(_e, _ptr))
390 void bch2_extent_crc_append(struct bkey_i_extent *,
391 struct bch_extent_crc_unpacked);
393 static inline void __extent_entry_push(struct bkey_i_extent *e)
395 union bch_extent_entry *entry = extent_entry_last(extent_i_to_s(e));
397 EBUG_ON(bkey_val_u64s(&e->k) + extent_entry_u64s(entry) >
398 BKEY_EXTENT_VAL_U64s_MAX);
400 e->k.u64s += extent_entry_u64s(entry);
403 static inline void extent_ptr_append(struct bkey_i_extent *e,
404 struct bch_extent_ptr ptr)
406 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
407 extent_entry_last(extent_i_to_s(e))->ptr = ptr;
408 __extent_entry_push(e);
411 static inline struct bch_devs_list bch2_extent_devs(struct bkey_s_c_extent e)
413 struct bch_devs_list ret = (struct bch_devs_list) { 0 };
414 const struct bch_extent_ptr *ptr;
416 extent_for_each_ptr(e, ptr)
417 ret.devs[ret.nr++] = ptr->dev;
422 static inline struct bch_devs_list bch2_extent_dirty_devs(struct bkey_s_c_extent e)
424 struct bch_devs_list ret = (struct bch_devs_list) { 0 };
425 const struct bch_extent_ptr *ptr;
427 extent_for_each_ptr(e, ptr)
429 ret.devs[ret.nr++] = ptr->dev;
434 static inline struct bch_devs_list bch2_extent_cached_devs(struct bkey_s_c_extent e)
436 struct bch_devs_list ret = (struct bch_devs_list) { 0 };
437 const struct bch_extent_ptr *ptr;
439 extent_for_each_ptr(e, ptr)
441 ret.devs[ret.nr++] = ptr->dev;
446 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
450 case BCH_EXTENT_CACHED:
451 return bch2_extent_devs(bkey_s_c_to_extent(k));
453 return (struct bch_devs_list) { .nr = 0 };
457 static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
461 case BCH_EXTENT_CACHED:
462 return bch2_extent_dirty_devs(bkey_s_c_to_extent(k));
464 return (struct bch_devs_list) { .nr = 0 };
468 static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
472 case BCH_EXTENT_CACHED:
473 return bch2_extent_cached_devs(bkey_s_c_to_extent(k));
475 return (struct bch_devs_list) { .nr = 0 };
479 bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent,
480 struct bch_extent_crc_unpacked);
481 bool bch2_extent_narrow_crcs(struct bkey_i_extent *, struct bch_extent_crc_unpacked);
482 void bch2_extent_drop_redundant_crcs(struct bkey_s_extent);
484 void __bch2_extent_drop_ptr(struct bkey_s_extent, struct bch_extent_ptr *);
485 void bch2_extent_drop_ptr(struct bkey_s_extent, struct bch_extent_ptr *);
487 bool bch2_cut_front(struct bpos, struct bkey_i *);
488 bool bch2_cut_back(struct bpos, struct bkey *);
489 void bch2_key_resize(struct bkey *, unsigned);
491 int bch2_check_range_allocated(struct bch_fs *, struct bpos, u64);
493 #endif /* _BCACHEFS_EXTENTS_H */