]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extents.h
Update bcachefs sources to 62de7539dc bcachefs: Make bkey types globally unique
[bcachefs-tools-debian] / libbcachefs / extents.h
1 #ifndef _BCACHEFS_EXTENTS_H
2 #define _BCACHEFS_EXTENTS_H
3
4 #include "bcachefs.h"
5 #include "bkey.h"
6 #include "extents_types.h"
7
8 struct bch_fs;
9 struct btree_insert;
10 struct btree_insert_entry;
11
12 /* extent entries: */
13
14 #define extent_entry_last(_e)           bkey_val_end(_e)
15
16 #define entry_to_ptr(_entry)                                            \
17 ({                                                                      \
18         EBUG_ON((_entry) && !extent_entry_is_ptr(_entry));              \
19                                                                         \
20         __builtin_choose_expr(                                          \
21                 type_is_exact(_entry, const union bch_extent_entry *),  \
22                 (const struct bch_extent_ptr *) (_entry),               \
23                 (struct bch_extent_ptr *) (_entry));                    \
24 })
25
26 /* downcast, preserves const */
27 #define to_entry(_entry)                                                \
28 ({                                                                      \
29         BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) &&        \
30                      !type_is(_entry, struct bch_extent_ptr *) &&       \
31                      !type_is(_entry, struct bch_extent_stripe_ptr *)); \
32                                                                         \
33         __builtin_choose_expr(                                          \
34                 (type_is_exact(_entry, const union bch_extent_crc *) || \
35                  type_is_exact(_entry, const struct bch_extent_ptr *) ||\
36                  type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
37                 (const union bch_extent_entry *) (_entry),              \
38                 (union bch_extent_entry *) (_entry));                   \
39 })
40
41 static inline unsigned
42 __extent_entry_type(const union bch_extent_entry *e)
43 {
44         return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
45 }
46
47 static inline enum bch_extent_entry_type
48 extent_entry_type(const union bch_extent_entry *e)
49 {
50         int ret = __ffs(e->type);
51
52         EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
53
54         return ret;
55 }
56
57 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
58 {
59         switch (extent_entry_type(entry)) {
60 #define x(f, n)                                         \
61         case BCH_EXTENT_ENTRY_##f:                      \
62                 return sizeof(struct bch_extent_##f);
63         BCH_EXTENT_ENTRY_TYPES()
64 #undef x
65         default:
66                 BUG();
67         }
68 }
69
70 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
71 {
72         return extent_entry_bytes(entry) / sizeof(u64);
73 }
74
75 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
76 {
77         switch (extent_entry_type(e)) {
78         case BCH_EXTENT_ENTRY_ptr:
79                 return true;
80         default:
81                 return false;
82         }
83 }
84
85 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
86 {
87         switch (extent_entry_type(e)) {
88         case BCH_EXTENT_ENTRY_crc32:
89         case BCH_EXTENT_ENTRY_crc64:
90         case BCH_EXTENT_ENTRY_crc128:
91                 return true;
92         default:
93                 return false;
94         }
95 }
96
97 union bch_extent_crc {
98         u8                              type;
99         struct bch_extent_crc32         crc32;
100         struct bch_extent_crc64         crc64;
101         struct bch_extent_crc128        crc128;
102 };
103
104 #define __entry_to_crc(_entry)                                          \
105         __builtin_choose_expr(                                          \
106                 type_is_exact(_entry, const union bch_extent_entry *),  \
107                 (const union bch_extent_crc *) (_entry),                \
108                 (union bch_extent_crc *) (_entry))
109
110 #define entry_to_crc(_entry)                                            \
111 ({                                                                      \
112         EBUG_ON((_entry) && !extent_entry_is_crc(_entry));              \
113                                                                         \
114         __entry_to_crc(_entry);                                         \
115 })
116
117 static inline struct bch_extent_crc_unpacked
118 bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
119 {
120 #define common_fields(_crc)                                             \
121                 .csum_type              = _crc.csum_type,               \
122                 .compression_type       = _crc.compression_type,        \
123                 .compressed_size        = _crc._compressed_size + 1,    \
124                 .uncompressed_size      = _crc._uncompressed_size + 1,  \
125                 .offset                 = _crc.offset,                  \
126                 .live_size              = k->size
127
128         if (!crc)
129                 return (struct bch_extent_crc_unpacked) {
130                         .compressed_size        = k->size,
131                         .uncompressed_size      = k->size,
132                         .live_size              = k->size,
133                 };
134
135         switch (extent_entry_type(to_entry(crc))) {
136         case BCH_EXTENT_ENTRY_crc32: {
137                 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
138                         common_fields(crc->crc32),
139                 };
140
141                 *((__le32 *) &ret.csum.lo) = crc->crc32.csum;
142
143                 memcpy(&ret.csum.lo, &crc->crc32.csum,
144                        sizeof(crc->crc32.csum));
145
146                 return ret;
147         }
148         case BCH_EXTENT_ENTRY_crc64: {
149                 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
150                         common_fields(crc->crc64),
151                         .nonce                  = crc->crc64.nonce,
152                         .csum.lo                = (__force __le64) crc->crc64.csum_lo,
153                 };
154
155                 *((__le16 *) &ret.csum.hi) = crc->crc64.csum_hi;
156
157                 return ret;
158         }
159         case BCH_EXTENT_ENTRY_crc128: {
160                 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
161                         common_fields(crc->crc128),
162                         .nonce                  = crc->crc128.nonce,
163                         .csum                   = crc->crc128.csum,
164                 };
165
166                 return ret;
167         }
168         default:
169                 BUG();
170         }
171 #undef common_fields
172 }
173
174 /* bkey_ptrs: generically over any key type that has ptrs */
175
176 struct bkey_ptrs_c {
177         const union bch_extent_entry    *start;
178         const union bch_extent_entry    *end;
179 };
180
181 struct bkey_ptrs {
182         union bch_extent_entry  *start;
183         union bch_extent_entry  *end;
184 };
185
186 /* iterate over bkey ptrs */
187
188 #define extent_entry_next(_entry)                                       \
189         ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
190
191 #define __bkey_extent_entry_for_each_from(_start, _end, _entry)         \
192         for ((_entry) = (_start);                                       \
193              (_entry) < (_end);                                         \
194              (_entry) = extent_entry_next(_entry))
195
196 #define __bkey_ptr_next(_ptr, _end)                                     \
197 ({                                                                      \
198         typeof(_end) _entry;                                            \
199                                                                         \
200         __bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry) \
201                 if (extent_entry_is_ptr(_entry))                        \
202                         break;                                          \
203                                                                         \
204         _entry < (_end) ? entry_to_ptr(_entry) : NULL;                  \
205 })
206
207 #define bkey_extent_entry_for_each_from(_p, _entry, _start)             \
208         __bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
209
210 #define bkey_extent_entry_for_each(_p, _entry)                          \
211         bkey_extent_entry_for_each_from(_p, _entry, _p.start)
212
213 #define __bkey_for_each_ptr(_start, _end, _ptr)                         \
214         for ((_ptr) = (_start);                                         \
215              ((_ptr) = __bkey_ptr_next(_ptr, _end));                    \
216              (_ptr)++)
217
218 #define bkey_ptr_next(_p, _ptr)                                         \
219         __bkey_ptr_next(_ptr, (_p).end)
220
221 #define bkey_for_each_ptr(_p, _ptr)                                     \
222         __bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
223
224 #define __bkey_ptr_next_decode(_k, _end, _ptr, _entry)                  \
225 ({                                                                      \
226         __label__ out;                                                  \
227                                                                         \
228         (_ptr).idx      = 0;                                            \
229         (_ptr).ec_nr    = 0;                                            \
230                                                                         \
231         __bkey_extent_entry_for_each_from(_entry, _end, _entry)         \
232                 switch (extent_entry_type(_entry)) {                    \
233                 case BCH_EXTENT_ENTRY_ptr:                              \
234                         (_ptr).ptr              = _entry->ptr;          \
235                         goto out;                                       \
236                 case BCH_EXTENT_ENTRY_crc32:                            \
237                 case BCH_EXTENT_ENTRY_crc64:                            \
238                 case BCH_EXTENT_ENTRY_crc128:                           \
239                         (_ptr).crc = bch2_extent_crc_unpack(_k,         \
240                                         entry_to_crc(_entry));          \
241                         break;                                          \
242                 case BCH_EXTENT_ENTRY_stripe_ptr:                       \
243                         (_ptr).ec[(_ptr).ec_nr++] = _entry->stripe_ptr; \
244                         break;                                          \
245                 }                                                       \
246 out:                                                                    \
247         _entry < (_end);                                                \
248 })
249
250 #define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry)      \
251         for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL),             \
252              (_entry) = _start;                                         \
253              __bkey_ptr_next_decode(_k, _end, _ptr, _entry);            \
254              (_entry) = extent_entry_next(_entry))
255
256 #define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry)                  \
257         __bkey_for_each_ptr_decode(_k, (_p).start, (_p).end,            \
258                                    _ptr, _entry)
259
260 /* utility code common to all keys with pointers: */
261
262 static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
263 {
264         switch (k.k->type) {
265         case KEY_TYPE_btree_ptr: {
266                 struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
267                 return (struct bkey_ptrs_c) {
268                         to_entry(&e.v->start[0]),
269                         to_entry(bkey_val_end(e))
270                 };
271         }
272         case KEY_TYPE_extent: {
273                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
274                 return (struct bkey_ptrs_c) {
275                         e.v->start,
276                         extent_entry_last(e)
277                 };
278         }
279         case KEY_TYPE_stripe: {
280                 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
281                 return (struct bkey_ptrs_c) {
282                         to_entry(&s.v->ptrs[0]),
283                         to_entry(&s.v->ptrs[s.v->nr_blocks]),
284                 };
285         }
286         default:
287                 return (struct bkey_ptrs_c) { NULL, NULL };
288         }
289 }
290
291 static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
292 {
293         struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
294
295         return (struct bkey_ptrs) {
296                 (void *) p.start,
297                 (void *) p.end
298         };
299 }
300
301 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
302 {
303         struct bch_devs_list ret = (struct bch_devs_list) { 0 };
304         struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
305         const struct bch_extent_ptr *ptr;
306
307         bkey_for_each_ptr(p, ptr)
308                 ret.devs[ret.nr++] = ptr->dev;
309
310         return ret;
311 }
312
313 static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
314 {
315         struct bch_devs_list ret = (struct bch_devs_list) { 0 };
316         struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
317         const struct bch_extent_ptr *ptr;
318
319         bkey_for_each_ptr(p, ptr)
320                 if (!ptr->cached)
321                         ret.devs[ret.nr++] = ptr->dev;
322
323         return ret;
324 }
325
326 static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
327 {
328         struct bch_devs_list ret = (struct bch_devs_list) { 0 };
329         struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
330         const struct bch_extent_ptr *ptr;
331
332         bkey_for_each_ptr(p, ptr)
333                 if (ptr->cached)
334                         ret.devs[ret.nr++] = ptr->dev;
335
336         return ret;
337 }
338
339 static inline bool bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
340 {
341         struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
342         const struct bch_extent_ptr *ptr;
343
344         bkey_for_each_ptr(p, ptr)
345                 if (ptr->dev == dev)
346                         return ptr;
347
348         return NULL;
349 }
350
351 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
352 unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c);
353 unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
354
355 void bch2_mark_io_failure(struct bch_io_failures *,
356                           struct extent_ptr_decoded *);
357 int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
358                                struct bch_io_failures *,
359                                struct extent_ptr_decoded *);
360
361 /* bch_btree_ptr: */
362
363 const char *bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c);
364 void bch2_btree_ptr_debugcheck(struct bch_fs *, struct btree *,
365                                struct bkey_s_c);
366 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
367                             struct bkey_s_c);
368 void bch2_ptr_swab(const struct bkey_format *, struct bkey_packed *);
369
370 #define bch2_bkey_ops_btree_ptr (struct bkey_ops) {             \
371         .key_invalid    = bch2_btree_ptr_invalid,               \
372         .key_debugcheck = bch2_btree_ptr_debugcheck,            \
373         .val_to_text    = bch2_btree_ptr_to_text,               \
374         .swab           = bch2_ptr_swab,                        \
375 }
376
377 /* bch_extent: */
378
379 const char *bch2_extent_invalid(const struct bch_fs *, struct bkey_s_c);
380 void bch2_extent_debugcheck(struct bch_fs *, struct btree *, struct bkey_s_c);
381 void bch2_extent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
382 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
383 enum merge_result bch2_extent_merge(struct bch_fs *,
384                                     struct bkey_i *, struct bkey_i *);
385
386 #define bch2_bkey_ops_extent (struct bkey_ops) {                \
387         .key_invalid    = bch2_extent_invalid,                  \
388         .key_debugcheck = bch2_extent_debugcheck,               \
389         .val_to_text    = bch2_extent_to_text,                  \
390         .swab           = bch2_ptr_swab,                        \
391         .key_normalize  = bch2_extent_normalize,                \
392         .key_merge      = bch2_extent_merge,                    \
393 }
394
395 /* bch_reservation: */
396
397 const char *bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c);
398 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
399 enum merge_result bch2_reservation_merge(struct bch_fs *,
400                                          struct bkey_i *, struct bkey_i *);
401
402 #define bch2_bkey_ops_reservation (struct bkey_ops) {           \
403         .key_invalid    = bch2_reservation_invalid,             \
404         .val_to_text    = bch2_reservation_to_text,             \
405         .key_merge      = bch2_reservation_merge,               \
406 }
407
408 void bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
409
410 static inline bool bch2_extent_is_atomic(struct bkey *k,
411                                          struct btree_iter *iter)
412 {
413         struct btree *b = iter->l[0].b;
414
415         return bkey_cmp(k->p, b->key.k.p) <= 0 &&
416                 bkey_cmp(bkey_start_pos(k), b->data->min_key) >= 0;
417 }
418
419 enum btree_insert_ret
420 bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *,
421                        unsigned *);
422 enum btree_insert_ret
423 bch2_insert_fixup_extent(struct btree_insert *, struct btree_insert_entry *);
424
425 void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent,
426                                       unsigned, unsigned);
427
428 const struct bch_extent_ptr *
429 bch2_extent_has_device(struct bkey_s_c_extent, unsigned);
430 const struct bch_extent_ptr *
431 bch2_extent_has_group(struct bch_fs *, struct bkey_s_c_extent, unsigned);
432 const struct bch_extent_ptr *
433 bch2_extent_has_target(struct bch_fs *, struct bkey_s_c_extent, unsigned);
434
435 unsigned bch2_extent_is_compressed(struct bkey_s_c);
436
437 bool bch2_extent_matches_ptr(struct bch_fs *, struct bkey_s_c_extent,
438                              struct bch_extent_ptr, u64);
439
440 static inline bool bkey_extent_is_data(const struct bkey *k)
441 {
442         switch (k->type) {
443         case KEY_TYPE_btree_ptr:
444         case KEY_TYPE_extent:
445                 return true;
446         default:
447                 return false;
448         }
449 }
450
451 static inline bool bkey_extent_is_allocation(const struct bkey *k)
452 {
453         switch (k->type) {
454         case KEY_TYPE_extent:
455         case KEY_TYPE_reservation:
456                 return true;
457         default:
458                 return false;
459         }
460 }
461
462 static inline bool bch2_extent_is_fully_allocated(struct bkey_s_c k)
463 {
464         return bkey_extent_is_allocation(k.k) &&
465                 !bch2_extent_is_compressed(k);
466 }
467
468 void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
469 void bch2_bkey_drop_device(struct bkey_s, unsigned);
470
471 /* Extent entry iteration: */
472
473 #define extent_for_each_entry_from(_e, _entry, _start)                  \
474         __bkey_extent_entry_for_each_from(_start,                       \
475                                 extent_entry_last(_e),_entry)
476
477 #define extent_for_each_entry(_e, _entry)                               \
478         extent_for_each_entry_from(_e, _entry, (_e).v->start)
479
480 #define extent_ptr_next(_e, _ptr)                                       \
481         __bkey_ptr_next(_ptr, extent_entry_last(_e))
482
483 #define extent_for_each_ptr(_e, _ptr)                                   \
484         __bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
485
486 #define extent_crc_next(_e, _crc, _iter)                                \
487 ({                                                                      \
488         extent_for_each_entry_from(_e, _iter, _iter)                    \
489                 if (extent_entry_is_crc(_iter)) {                       \
490                         (_crc) = bch2_extent_crc_unpack((_e).k, entry_to_crc(_iter));\
491                         break;                                          \
492                 }                                                       \
493                                                                         \
494         (_iter) < extent_entry_last(_e);                                \
495 })
496
497 #define extent_for_each_crc(_e, _crc, _iter)                            \
498         for ((_crc) = bch2_extent_crc_unpack((_e).k, NULL),             \
499              (_iter) = (_e).v->start;                                   \
500              extent_crc_next(_e, _crc, _iter);                          \
501              (_iter) = extent_entry_next(_iter))
502
503 #define extent_for_each_ptr_decode(_e, _ptr, _entry)                    \
504         __bkey_for_each_ptr_decode((_e).k, (_e).v->start,               \
505                                    extent_entry_last(_e), _ptr, _entry)
506
507 void bch2_extent_crc_append(struct bkey_i_extent *,
508                             struct bch_extent_crc_unpacked);
509 void bch2_extent_ptr_decoded_append(struct bkey_i_extent *,
510                                     struct extent_ptr_decoded *);
511
512 static inline void __extent_entry_insert(struct bkey_i_extent *e,
513                                          union bch_extent_entry *dst,
514                                          union bch_extent_entry *new)
515 {
516         union bch_extent_entry *end = extent_entry_last(extent_i_to_s(e));
517
518         memmove_u64s_up((u64 *) dst + extent_entry_u64s(new),
519                         dst, (u64 *) end - (u64 *) dst);
520         e->k.u64s += extent_entry_u64s(new);
521         memcpy(dst, new, extent_entry_bytes(new));
522 }
523
524 static inline void __extent_entry_push(struct bkey_i_extent *e)
525 {
526         union bch_extent_entry *entry = extent_entry_last(extent_i_to_s(e));
527
528         EBUG_ON(bkey_val_u64s(&e->k) + extent_entry_u64s(entry) >
529                 BKEY_EXTENT_VAL_U64s_MAX);
530
531         e->k.u64s += extent_entry_u64s(entry);
532 }
533
534 bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent,
535                                  struct bch_extent_crc_unpacked);
536 bool bch2_extent_narrow_crcs(struct bkey_i_extent *, struct bch_extent_crc_unpacked);
537
538 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
539                                            struct bch_extent_ptr *);
540
541 #define bch2_bkey_drop_ptrs(_k, _ptr, _cond)                            \
542 do {                                                                    \
543         struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k);                    \
544                                                                         \
545         _ptr = &_ptrs.start->ptr;                                       \
546                                                                         \
547         while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) {                   \
548                 if (_cond) {                                            \
549                         _ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr);   \
550                         _ptrs = bch2_bkey_ptrs(_k);                     \
551                         continue;                                       \
552                 }                                                       \
553                                                                         \
554                 (_ptr)++;                                               \
555         }                                                               \
556 } while (0)
557
558 bool __bch2_cut_front(struct bpos, struct bkey_s);
559
560 static inline bool bch2_cut_front(struct bpos where, struct bkey_i *k)
561 {
562         return __bch2_cut_front(where, bkey_i_to_s(k));
563 }
564
565 bool bch2_cut_back(struct bpos, struct bkey *);
566 void bch2_key_resize(struct bkey *, unsigned);
567
568 /*
569  * In extent_sort_fix_overlapping(), insert_fixup_extent(),
570  * extent_merge_inline() - we're modifying keys in place that are packed. To do
571  * that we have to unpack the key, modify the unpacked key - then this
572  * copies/repacks the unpacked to the original as necessary.
573  */
574 static inline void extent_save(struct btree *b, struct bkey_packed *dst,
575                                struct bkey *src)
576 {
577         struct bkey_format *f = &b->format;
578         struct bkey_i *dst_unpacked;
579
580         if ((dst_unpacked = packed_to_bkey(dst)))
581                 dst_unpacked->k = *src;
582         else
583                 BUG_ON(!bch2_bkey_pack_key(dst, src, f));
584 }
585
586 int bch2_check_range_allocated(struct bch_fs *, struct bpos, u64);
587
588 #endif /* _BCACHEFS_EXTENTS_H */