1 #ifndef _BCACHE_EXTENTS_H
2 #define _BCACHE_EXTENTS_H
10 struct btree_node_iter;
12 struct btree_insert_entry;
13 struct extent_insert_hook;
17 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *,
19 struct btree_node_iter *);
20 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
23 struct btree_node_iter *);
25 extern const struct bkey_ops bch2_bkey_btree_ops;
26 extern const struct bkey_ops bch2_bkey_extent_ops;
28 void bch2_get_read_device(struct bch_fs *,
30 const struct bch_extent_ptr *,
31 const union bch_extent_crc *,
32 struct bch_devs_mask *,
33 struct extent_pick_ptr *);
34 struct extent_pick_ptr
35 bch2_btree_pick_ptr(struct bch_fs *, const struct btree *);
37 void bch2_extent_pick_ptr(struct bch_fs *, struct bkey_s_c,
38 struct bch_devs_mask *,
39 struct extent_pick_ptr *);
42 bch2_insert_fixup_extent(struct btree_insert *,
43 struct btree_insert_entry *);
45 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
46 void bch2_extent_mark_replicas_cached(struct bch_fs *,
47 struct bkey_s_extent, unsigned);
49 unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent);
50 unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c);
52 static inline bool bkey_extent_is_data(const struct bkey *k)
56 case BCH_EXTENT_CACHED:
63 static inline bool bkey_extent_is_allocation(const struct bkey *k)
67 case BCH_EXTENT_CACHED:
75 static inline bool bkey_extent_is_cached(const struct bkey *k)
77 return k->type == BCH_EXTENT_CACHED;
80 static inline void bkey_extent_set_cached(struct bkey *k, bool cached)
82 EBUG_ON(k->type != BCH_EXTENT &&
83 k->type != BCH_EXTENT_CACHED);
85 k->type = cached ? BCH_EXTENT_CACHED : BCH_EXTENT;
88 static inline unsigned
89 __extent_entry_type(const union bch_extent_entry *e)
91 return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
94 static inline enum bch_extent_entry_type
95 extent_entry_type(const union bch_extent_entry *e)
97 int ret = __ffs(e->type);
99 EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
104 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
106 switch (extent_entry_type(entry)) {
107 case BCH_EXTENT_ENTRY_crc32:
108 return sizeof(struct bch_extent_crc32);
109 case BCH_EXTENT_ENTRY_crc64:
110 return sizeof(struct bch_extent_crc64);
111 case BCH_EXTENT_ENTRY_crc128:
112 return sizeof(struct bch_extent_crc128);
113 case BCH_EXTENT_ENTRY_ptr:
114 return sizeof(struct bch_extent_ptr);
120 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
122 return extent_entry_bytes(entry) / sizeof(u64);
125 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
127 return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
130 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
132 return !extent_entry_is_ptr(e);
135 union bch_extent_crc {
137 struct bch_extent_crc32 crc32;
138 struct bch_extent_crc64 crc64;
139 struct bch_extent_crc128 crc128;
142 /* downcast, preserves const */
143 #define to_entry(_entry) \
145 BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) && \
146 !type_is(_entry, struct bch_extent_ptr *)); \
148 __builtin_choose_expr( \
149 (type_is_exact(_entry, const union bch_extent_crc *) || \
150 type_is_exact(_entry, const struct bch_extent_ptr *)), \
151 (const union bch_extent_entry *) (_entry), \
152 (union bch_extent_entry *) (_entry)); \
155 #define __entry_to_crc(_entry) \
156 __builtin_choose_expr( \
157 type_is_exact(_entry, const union bch_extent_entry *), \
158 (const union bch_extent_crc *) (_entry), \
159 (union bch_extent_crc *) (_entry))
161 #define entry_to_crc(_entry) \
163 EBUG_ON((_entry) && !extent_entry_is_crc(_entry)); \
165 __entry_to_crc(_entry); \
168 #define entry_to_ptr(_entry) \
170 EBUG_ON((_entry) && !extent_entry_is_ptr(_entry)); \
172 __builtin_choose_expr( \
173 type_is_exact(_entry, const union bch_extent_entry *), \
174 (const struct bch_extent_ptr *) (_entry), \
175 (struct bch_extent_ptr *) (_entry)); \
178 enum bch_extent_crc_type {
185 static inline enum bch_extent_crc_type
186 __extent_crc_type(const union bch_extent_crc *crc)
189 return BCH_EXTENT_CRC_NONE;
191 switch (extent_entry_type(to_entry(crc))) {
192 case BCH_EXTENT_ENTRY_crc32:
193 return BCH_EXTENT_CRC32;
194 case BCH_EXTENT_ENTRY_crc64:
195 return BCH_EXTENT_CRC64;
196 case BCH_EXTENT_ENTRY_crc128:
197 return BCH_EXTENT_CRC128;
203 #define extent_crc_type(_crc) \
205 BUILD_BUG_ON(!type_is(_crc, struct bch_extent_crc32 *) && \
206 !type_is(_crc, struct bch_extent_crc64 *) && \
207 !type_is(_crc, struct bch_extent_crc128 *) && \
208 !type_is(_crc, union bch_extent_crc *)); \
210 type_is(_crc, struct bch_extent_crc32 *) ? BCH_EXTENT_CRC32 \
211 : type_is(_crc, struct bch_extent_crc64 *) ? BCH_EXTENT_CRC64 \
212 : type_is(_crc, struct bch_extent_crc128 *) ? BCH_EXTENT_CRC128 \
213 : __extent_crc_type((union bch_extent_crc *) _crc); \
216 #define extent_entry_next(_entry) \
217 ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
219 #define extent_entry_last(_e) \
220 vstruct_idx((_e).v, bkey_val_u64s((_e).k))
222 /* Iterate over all entries: */
224 #define extent_for_each_entry_from(_e, _entry, _start) \
225 for ((_entry) = _start; \
226 (_entry) < extent_entry_last(_e); \
227 (_entry) = extent_entry_next(_entry))
229 #define extent_for_each_entry(_e, _entry) \
230 extent_for_each_entry_from(_e, _entry, (_e).v->start)
232 /* Iterate over crcs only: */
234 #define extent_crc_next(_e, _p) \
236 typeof(&(_e).v->start[0]) _entry = _p; \
238 while ((_entry) < extent_entry_last(_e) && \
239 !extent_entry_is_crc(_entry)) \
240 (_entry) = extent_entry_next(_entry); \
242 entry_to_crc(_entry < extent_entry_last(_e) ? _entry : NULL); \
245 #define extent_for_each_crc(_e, _crc) \
246 for ((_crc) = extent_crc_next(_e, (_e).v->start); \
248 (_crc) = extent_crc_next(_e, extent_entry_next(to_entry(_crc))))
250 /* Iterate over pointers, with crcs: */
252 #define extent_ptr_crc_next_filter(_e, _crc, _ptr, _filter) \
255 typeof(&(_e).v->start[0]) _entry; \
257 extent_for_each_entry_from(_e, _entry, to_entry(_ptr)) \
258 if (extent_entry_is_crc(_entry)) { \
259 (_crc) = entry_to_crc(_entry); \
261 _ptr = entry_to_ptr(_entry); \
271 #define extent_for_each_ptr_crc_filter(_e, _ptr, _crc, _filter) \
272 for ((_crc) = NULL, \
273 (_ptr) = &(_e).v->start->ptr; \
274 ((_ptr) = extent_ptr_crc_next_filter(_e, _crc, _ptr, _filter));\
277 #define extent_for_each_ptr_crc(_e, _ptr, _crc) \
278 extent_for_each_ptr_crc_filter(_e, _ptr, _crc, true)
280 /* Iterate over pointers only, and from a given position: */
282 #define extent_ptr_next_filter(_e, _ptr, _filter) \
284 typeof(__entry_to_crc(&(_e).v->start[0])) _crc; \
286 extent_ptr_crc_next_filter(_e, _crc, _ptr, _filter); \
289 #define extent_ptr_next(_e, _ptr) \
290 extent_ptr_next_filter(_e, _ptr, true)
292 #define extent_for_each_ptr_filter(_e, _ptr, _filter) \
293 for ((_ptr) = &(_e).v->start->ptr; \
294 ((_ptr) = extent_ptr_next_filter(_e, _ptr, _filter)); \
297 #define extent_for_each_ptr(_e, _ptr) \
298 extent_for_each_ptr_filter(_e, _ptr, true)
300 #define extent_ptr_prev(_e, _ptr) \
302 typeof(&(_e).v->start->ptr) _p; \
303 typeof(&(_e).v->start->ptr) _prev = NULL; \
305 extent_for_each_ptr(_e, _p) { \
315 * Use this when you'll be dropping pointers as you iterate. Quadratic,
318 #define extent_for_each_ptr_backwards(_e, _ptr) \
319 for ((_ptr) = extent_ptr_prev(_e, NULL); \
321 (_ptr) = extent_ptr_prev(_e, _ptr))
323 void bch2_extent_crc_append(struct bkey_i_extent *, unsigned, unsigned,
324 unsigned, unsigned, struct bch_csum, unsigned);
326 static inline void __extent_entry_push(struct bkey_i_extent *e)
328 union bch_extent_entry *entry = extent_entry_last(extent_i_to_s(e));
330 EBUG_ON(bkey_val_u64s(&e->k) + extent_entry_u64s(entry) >
331 BKEY_EXTENT_VAL_U64s_MAX);
333 e->k.u64s += extent_entry_u64s(entry);
336 static inline void extent_ptr_append(struct bkey_i_extent *e,
337 struct bch_extent_ptr ptr)
339 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
340 extent_entry_last(extent_i_to_s(e))->ptr = ptr;
341 __extent_entry_push(e);
344 static inline struct bch_extent_crc128 crc_to_128(const struct bkey *k,
345 const union bch_extent_crc *crc)
349 switch (extent_crc_type(crc)) {
350 case BCH_EXTENT_CRC_NONE:
351 return (struct bch_extent_crc128) {
352 ._compressed_size = k->size - 1,
353 ._uncompressed_size = k->size - 1,
355 case BCH_EXTENT_CRC32:
356 return (struct bch_extent_crc128) {
357 .type = 1 << BCH_EXTENT_ENTRY_crc128,
358 ._compressed_size = crc->crc32._compressed_size,
359 ._uncompressed_size = crc->crc32._uncompressed_size,
360 .offset = crc->crc32.offset,
361 .csum_type = crc->crc32.csum_type,
362 .compression_type = crc->crc32.compression_type,
363 .csum.lo = crc->crc32.csum,
365 case BCH_EXTENT_CRC64:
366 return (struct bch_extent_crc128) {
367 .type = 1 << BCH_EXTENT_ENTRY_crc128,
368 ._compressed_size = crc->crc64._compressed_size,
369 ._uncompressed_size = crc->crc64._uncompressed_size,
370 .offset = crc->crc64.offset,
371 .nonce = crc->crc64.nonce,
372 .csum_type = crc->crc64.csum_type,
373 .compression_type = crc->crc64.compression_type,
374 .csum.lo = crc->crc64.csum_lo,
375 .csum.hi = crc->crc64.csum_hi,
377 case BCH_EXTENT_CRC128:
384 #define crc_compressed_size(_k, _crc) \
386 unsigned _size = 0; \
388 switch (extent_crc_type(_crc)) { \
389 case BCH_EXTENT_CRC_NONE: \
390 _size = ((const struct bkey *) (_k))->size; \
392 case BCH_EXTENT_CRC32: \
393 _size = ((struct bch_extent_crc32 *) _crc) \
394 ->_compressed_size + 1; \
396 case BCH_EXTENT_CRC64: \
397 _size = ((struct bch_extent_crc64 *) _crc) \
398 ->_compressed_size + 1; \
400 case BCH_EXTENT_CRC128: \
401 _size = ((struct bch_extent_crc128 *) _crc) \
402 ->_compressed_size + 1; \
408 #define crc_uncompressed_size(_k, _crc) \
410 unsigned _size = 0; \
412 switch (extent_crc_type(_crc)) { \
413 case BCH_EXTENT_CRC_NONE: \
414 _size = ((const struct bkey *) (_k))->size; \
416 case BCH_EXTENT_CRC32: \
417 _size = ((struct bch_extent_crc32 *) _crc) \
418 ->_uncompressed_size + 1; \
420 case BCH_EXTENT_CRC64: \
421 _size = ((struct bch_extent_crc64 *) _crc) \
422 ->_uncompressed_size + 1; \
424 case BCH_EXTENT_CRC128: \
425 _size = ((struct bch_extent_crc128 *) _crc) \
426 ->_uncompressed_size + 1; \
432 static inline unsigned crc_offset(const union bch_extent_crc *crc)
434 switch (extent_crc_type(crc)) {
435 case BCH_EXTENT_CRC_NONE:
437 case BCH_EXTENT_CRC32:
438 return crc->crc32.offset;
439 case BCH_EXTENT_CRC64:
440 return crc->crc64.offset;
441 case BCH_EXTENT_CRC128:
442 return crc->crc128.offset;
448 static inline unsigned crc_nonce(const union bch_extent_crc *crc)
450 switch (extent_crc_type(crc)) {
451 case BCH_EXTENT_CRC_NONE:
452 case BCH_EXTENT_CRC32:
454 case BCH_EXTENT_CRC64:
455 return crc->crc64.nonce;
456 case BCH_EXTENT_CRC128:
457 return crc->crc128.nonce;
463 static inline unsigned crc_csum_type(const union bch_extent_crc *crc)
465 switch (extent_crc_type(crc)) {
466 case BCH_EXTENT_CRC_NONE:
468 case BCH_EXTENT_CRC32:
469 return crc->crc32.csum_type;
470 case BCH_EXTENT_CRC64:
471 return crc->crc64.csum_type;
472 case BCH_EXTENT_CRC128:
473 return crc->crc128.csum_type;
479 static inline unsigned crc_compression_type(const union bch_extent_crc *crc)
481 switch (extent_crc_type(crc)) {
482 case BCH_EXTENT_CRC_NONE:
484 case BCH_EXTENT_CRC32:
485 return crc->crc32.compression_type;
486 case BCH_EXTENT_CRC64:
487 return crc->crc64.compression_type;
488 case BCH_EXTENT_CRC128:
489 return crc->crc128.compression_type;
495 static inline struct bch_csum crc_csum(const union bch_extent_crc *crc)
497 switch (extent_crc_type(crc)) {
498 case BCH_EXTENT_CRC_NONE:
499 return (struct bch_csum) { 0 };
500 case BCH_EXTENT_CRC32:
501 return (struct bch_csum) { .lo = crc->crc32.csum };
502 case BCH_EXTENT_CRC64:
503 return (struct bch_csum) {
504 .lo = crc->crc64.csum_lo,
505 .hi = crc->crc64.csum_hi,
507 case BCH_EXTENT_CRC128:
508 return crc->crc128.csum;
514 static inline unsigned bkey_extent_is_compressed(struct bkey_s_c k)
516 struct bkey_s_c_extent e;
517 const struct bch_extent_ptr *ptr;
518 const union bch_extent_crc *crc;
523 case BCH_EXTENT_CACHED:
524 e = bkey_s_c_to_extent(k);
526 extent_for_each_ptr_crc(e, ptr, crc)
528 crc_compression_type(crc) != BCH_COMPRESSION_NONE &&
529 crc_compressed_size(e.k, crc) < k.k->size)
530 ret = max_t(unsigned, ret,
531 crc_compressed_size(e.k, crc));
537 static inline unsigned extent_current_nonce(struct bkey_s_c_extent e)
539 const union bch_extent_crc *crc;
541 extent_for_each_crc(e, crc)
542 if (bch2_csum_type_is_encryption(crc_csum_type(crc)))
543 return crc_offset(crc) + crc_nonce(crc);
548 void bch2_extent_narrow_crcs(struct bkey_s_extent);
549 void bch2_extent_drop_redundant_crcs(struct bkey_s_extent);
551 void __bch2_extent_drop_ptr(struct bkey_s_extent, struct bch_extent_ptr *);
552 void bch2_extent_drop_ptr(struct bkey_s_extent, struct bch_extent_ptr *);
553 void bch2_extent_drop_ptr_idx(struct bkey_s_extent, unsigned);
555 const struct bch_extent_ptr *
556 bch2_extent_has_device(struct bkey_s_c_extent, unsigned);
557 struct bch_extent_ptr *
558 bch2_extent_find_ptr(struct bch_fs *, struct bkey_s_extent,
559 struct bch_extent_ptr);
560 struct bch_extent_ptr *
561 bch2_extent_find_matching_ptr(struct bch_fs *, struct bkey_s_extent,
562 struct bkey_s_c_extent);
564 bool bch2_cut_front(struct bpos, struct bkey_i *);
565 bool bch2_cut_back(struct bpos, struct bkey *);
566 void bch2_key_resize(struct bkey *, unsigned);
568 #endif /* _BCACHE_EXTENTS_H */