]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extents.h
Update bcachefs sources to d7dbddc450 bcachefs: revamp to_text methods
[bcachefs-tools-debian] / libbcachefs / extents.h
1 #ifndef _BCACHEFS_EXTENTS_H
2 #define _BCACHEFS_EXTENTS_H
3
4 #include "bcachefs.h"
5 #include "bkey.h"
6 #include "extents_types.h"
7
8 struct bch_fs;
9 struct journal_res;
10 struct btree_node_iter;
11 struct btree_node_iter_large;
12 struct btree_insert;
13 struct btree_insert_entry;
14 struct bch_devs_mask;
15 union bch_extent_crc;
16
17 const char *bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c);
18 void bch2_btree_ptr_debugcheck(struct bch_fs *, struct btree *,
19                                struct bkey_s_c);
20 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
21                             struct bkey_s_c);
22 void bch2_ptr_swab(const struct bkey_format *, struct bkey_packed *);
23
24 #define bch2_bkey_btree_ops (struct bkey_ops) {                 \
25         .key_invalid    = bch2_btree_ptr_invalid,               \
26         .key_debugcheck = bch2_btree_ptr_debugcheck,            \
27         .val_to_text    = bch2_btree_ptr_to_text,               \
28         .swab           = bch2_ptr_swab,                        \
29 }
30
31 const char *bch2_extent_invalid(const struct bch_fs *, struct bkey_s_c);
32 void bch2_extent_debugcheck(struct bch_fs *, struct btree *, struct bkey_s_c);
33 void bch2_extent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
34 bool bch2_ptr_normalize(struct bch_fs *, struct btree *, struct bkey_s);
35 enum merge_result bch2_extent_merge(struct bch_fs *, struct btree *,
36                                     struct bkey_i *, struct bkey_i *);
37
38 #define bch2_bkey_extent_ops (struct bkey_ops) {                \
39         .key_invalid    = bch2_extent_invalid,                  \
40         .key_debugcheck = bch2_extent_debugcheck,               \
41         .val_to_text    = bch2_extent_to_text,                  \
42         .swab           = bch2_ptr_swab,                        \
43         .key_normalize  = bch2_ptr_normalize,                   \
44         .key_merge      = bch2_extent_merge,                    \
45         .is_extents     = true,                                 \
46 }
47
48 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *,
49                                                   struct btree *,
50                                                   struct btree_node_iter_large *);
51 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
52                                                      struct bset *,
53                                                      struct btree *,
54                                                      struct btree_node_iter_large *);
55
56 void bch2_mark_io_failure(struct bch_io_failures *,
57                           struct extent_ptr_decoded *);
58 int bch2_btree_pick_ptr(struct bch_fs *, const struct btree *,
59                         struct bch_io_failures *,
60                         struct extent_ptr_decoded *);
61 int bch2_extent_pick_ptr(struct bch_fs *, struct bkey_s_c,
62                          struct bch_io_failures *,
63                          struct extent_ptr_decoded *);
64
65 void bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
66
67 static inline bool bch2_extent_is_atomic(struct bkey *k,
68                                          struct btree_iter *iter)
69 {
70         struct btree *b = iter->l[0].b;
71
72         return bkey_cmp(k->p, b->key.k.p) <= 0 &&
73                 bkey_cmp(bkey_start_pos(k), b->data->min_key) >= 0;
74 }
75
76 enum btree_insert_ret
77 bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *,
78                        unsigned *);
79 enum btree_insert_ret
80 bch2_insert_fixup_extent(struct btree_insert *, struct btree_insert_entry *);
81
82 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
83 void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent,
84                                       unsigned, unsigned);
85
86 const struct bch_extent_ptr *
87 bch2_extent_has_device(struct bkey_s_c_extent, unsigned);
88 void bch2_extent_drop_device(struct bkey_s_extent, unsigned);
89 const struct bch_extent_ptr *
90 bch2_extent_has_group(struct bch_fs *, struct bkey_s_c_extent, unsigned);
91 const struct bch_extent_ptr *
92 bch2_extent_has_target(struct bch_fs *, struct bkey_s_c_extent, unsigned);
93
94 unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent);
95 unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c);
96 unsigned bch2_extent_is_compressed(struct bkey_s_c);
97
98 unsigned bch2_extent_ptr_durability(struct bch_fs *,
99                                     const struct bch_extent_ptr *);
100 unsigned bch2_extent_durability(struct bch_fs *, struct bkey_s_c_extent);
101
102 bool bch2_extent_matches_ptr(struct bch_fs *, struct bkey_s_c_extent,
103                              struct bch_extent_ptr, u64);
104
105 static inline bool bkey_extent_is_data(const struct bkey *k)
106 {
107         switch (k->type) {
108         case BCH_EXTENT:
109         case BCH_EXTENT_CACHED:
110                 return true;
111         default:
112                 return false;
113         }
114 }
115
116 static inline bool bkey_extent_is_allocation(const struct bkey *k)
117 {
118         switch (k->type) {
119         case BCH_EXTENT:
120         case BCH_EXTENT_CACHED:
121         case BCH_RESERVATION:
122                 return true;
123         default:
124                 return false;
125         }
126 }
127
128 static inline bool bch2_extent_is_fully_allocated(struct bkey_s_c k)
129 {
130         return bkey_extent_is_allocation(k.k) &&
131                 !bch2_extent_is_compressed(k);
132 }
133
134 static inline bool bkey_extent_is_cached(const struct bkey *k)
135 {
136         return k->type == BCH_EXTENT_CACHED;
137 }
138
139 static inline void bkey_extent_set_cached(struct bkey *k, bool cached)
140 {
141         EBUG_ON(k->type != BCH_EXTENT &&
142                 k->type != BCH_EXTENT_CACHED);
143
144         k->type = cached ? BCH_EXTENT_CACHED : BCH_EXTENT;
145 }
146
147 static inline unsigned
148 __extent_entry_type(const union bch_extent_entry *e)
149 {
150         return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
151 }
152
153 static inline enum bch_extent_entry_type
154 extent_entry_type(const union bch_extent_entry *e)
155 {
156         int ret = __ffs(e->type);
157
158         EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
159
160         return ret;
161 }
162
163 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
164 {
165         switch (extent_entry_type(entry)) {
166 #define x(f, n)                                         \
167         case BCH_EXTENT_ENTRY_##f:                      \
168                 return sizeof(struct bch_extent_##f);
169         BCH_EXTENT_ENTRY_TYPES()
170 #undef x
171         default:
172                 BUG();
173         }
174 }
175
176 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
177 {
178         return extent_entry_bytes(entry) / sizeof(u64);
179 }
180
181 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
182 {
183         switch (extent_entry_type(e)) {
184         case BCH_EXTENT_ENTRY_ptr:
185                 return true;
186         default:
187                 return false;
188         }
189 }
190
191 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
192 {
193         switch (extent_entry_type(e)) {
194         case BCH_EXTENT_ENTRY_crc32:
195         case BCH_EXTENT_ENTRY_crc64:
196         case BCH_EXTENT_ENTRY_crc128:
197                 return true;
198         default:
199                 return false;
200         }
201 }
202
203 union bch_extent_crc {
204         u8                              type;
205         struct bch_extent_crc32         crc32;
206         struct bch_extent_crc64         crc64;
207         struct bch_extent_crc128        crc128;
208 };
209
210 /* downcast, preserves const */
211 #define to_entry(_entry)                                                \
212 ({                                                                      \
213         BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) &&        \
214                      !type_is(_entry, struct bch_extent_ptr *) &&       \
215                      !type_is(_entry, struct bch_extent_stripe_ptr *)); \
216                                                                         \
217         __builtin_choose_expr(                                          \
218                 (type_is_exact(_entry, const union bch_extent_crc *) || \
219                  type_is_exact(_entry, const struct bch_extent_ptr *) ||\
220                  type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
221                 (const union bch_extent_entry *) (_entry),              \
222                 (union bch_extent_entry *) (_entry));                   \
223 })
224
225 #define __entry_to_crc(_entry)                                          \
226         __builtin_choose_expr(                                          \
227                 type_is_exact(_entry, const union bch_extent_entry *),  \
228                 (const union bch_extent_crc *) (_entry),                \
229                 (union bch_extent_crc *) (_entry))
230
231 #define entry_to_crc(_entry)                                            \
232 ({                                                                      \
233         EBUG_ON((_entry) && !extent_entry_is_crc(_entry));              \
234                                                                         \
235         __entry_to_crc(_entry);                                         \
236 })
237
238 #define entry_to_ptr(_entry)                                            \
239 ({                                                                      \
240         EBUG_ON((_entry) && !extent_entry_is_ptr(_entry));              \
241                                                                         \
242         __builtin_choose_expr(                                          \
243                 type_is_exact(_entry, const union bch_extent_entry *),  \
244                 (const struct bch_extent_ptr *) (_entry),               \
245                 (struct bch_extent_ptr *) (_entry));                    \
246 })
247
248 /* checksum entries: */
249
250 static inline struct bch_extent_crc_unpacked
251 bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
252 {
253 #define common_fields(_crc)                                             \
254                 .csum_type              = _crc.csum_type,               \
255                 .compression_type       = _crc.compression_type,        \
256                 .compressed_size        = _crc._compressed_size + 1,    \
257                 .uncompressed_size      = _crc._uncompressed_size + 1,  \
258                 .offset                 = _crc.offset,                  \
259                 .live_size              = k->size
260
261         if (!crc)
262                 return (struct bch_extent_crc_unpacked) {
263                         .compressed_size        = k->size,
264                         .uncompressed_size      = k->size,
265                         .live_size              = k->size,
266                 };
267
268         switch (extent_entry_type(to_entry(crc))) {
269         case BCH_EXTENT_ENTRY_crc32: {
270                 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
271                         common_fields(crc->crc32),
272                 };
273
274                 *((__le32 *) &ret.csum.lo) = crc->crc32.csum;
275
276                 memcpy(&ret.csum.lo, &crc->crc32.csum,
277                        sizeof(crc->crc32.csum));
278
279                 return ret;
280         }
281         case BCH_EXTENT_ENTRY_crc64: {
282                 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
283                         common_fields(crc->crc64),
284                         .nonce                  = crc->crc64.nonce,
285                         .csum.lo                = (__force __le64) crc->crc64.csum_lo,
286                 };
287
288                 *((__le16 *) &ret.csum.hi) = crc->crc64.csum_hi;
289
290                 return ret;
291         }
292         case BCH_EXTENT_ENTRY_crc128: {
293                 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
294                         common_fields(crc->crc128),
295                         .nonce                  = crc->crc128.nonce,
296                         .csum                   = crc->crc128.csum,
297                 };
298
299                 return ret;
300         }
301         default:
302                 BUG();
303         }
304 #undef common_fields
305 }
306
307 /* Extent entry iteration: */
308
309 #define extent_entry_next(_entry)                                       \
310         ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
311
312 #define extent_entry_last(_e)                                           \
313         vstruct_idx((_e).v, bkey_val_u64s((_e).k))
314
315 /* Iterate over all entries: */
316
317 #define extent_for_each_entry_from(_e, _entry, _start)                  \
318         for ((_entry) = _start;                                         \
319              (_entry) < extent_entry_last(_e);                          \
320              (_entry) = extent_entry_next(_entry))
321
322 #define extent_for_each_entry(_e, _entry)                               \
323         extent_for_each_entry_from(_e, _entry, (_e).v->start)
324
325 /* Iterate over pointers only: */
326
327 #define extent_ptr_next(_e, _ptr)                                       \
328 ({                                                                      \
329         typeof(&(_e).v->start[0]) _entry;                               \
330                                                                         \
331         extent_for_each_entry_from(_e, _entry, to_entry(_ptr))          \
332                 if (extent_entry_is_ptr(_entry))                        \
333                         break;                                          \
334                                                                         \
335         _entry < extent_entry_last(_e) ? entry_to_ptr(_entry) : NULL;   \
336 })
337
338 #define extent_for_each_ptr(_e, _ptr)                                   \
339         for ((_ptr) = &(_e).v->start->ptr;                              \
340              ((_ptr) = extent_ptr_next(_e, _ptr));                      \
341              (_ptr)++)
342
343 /* Iterate over crcs only: */
344
345 #define extent_crc_next(_e, _crc, _iter)                                \
346 ({                                                                      \
347         extent_for_each_entry_from(_e, _iter, _iter)                    \
348                 if (extent_entry_is_crc(_iter)) {                       \
349                         (_crc) = bch2_extent_crc_unpack((_e).k, entry_to_crc(_iter));\
350                         break;                                          \
351                 }                                                       \
352                                                                         \
353         (_iter) < extent_entry_last(_e);                                \
354 })
355
356 #define extent_for_each_crc(_e, _crc, _iter)                            \
357         for ((_crc) = bch2_extent_crc_unpack((_e).k, NULL),             \
358              (_iter) = (_e).v->start;                                   \
359              extent_crc_next(_e, _crc, _iter);                          \
360              (_iter) = extent_entry_next(_iter))
361
362 /* Iterate over pointers, with crcs: */
363
364 static inline struct extent_ptr_decoded
365 __extent_ptr_decoded_init(const struct bkey *k)
366 {
367         return (struct extent_ptr_decoded) {
368                 .crc            = bch2_extent_crc_unpack(k, NULL),
369         };
370 }
371
372 #define EXTENT_ITERATE_EC               (1 << 0)
373
374 #define __extent_ptr_next_decode(_e, _ptr, _entry)                      \
375 ({                                                                      \
376         __label__ out;                                                  \
377                                                                         \
378         extent_for_each_entry_from(_e, _entry, _entry)                  \
379                 switch (extent_entry_type(_entry)) {                    \
380                 case BCH_EXTENT_ENTRY_ptr:                              \
381                         (_ptr).ptr              = _entry->ptr;          \
382                         goto out;                                       \
383                 case BCH_EXTENT_ENTRY_crc32:                            \
384                 case BCH_EXTENT_ENTRY_crc64:                            \
385                 case BCH_EXTENT_ENTRY_crc128:                           \
386                         (_ptr).crc = bch2_extent_crc_unpack((_e).k,     \
387                                         entry_to_crc(_entry));          \
388                         break;                                          \
389                 }                                                       \
390                                                                         \
391 out:                                                                    \
392         _entry < extent_entry_last(_e);                                 \
393 })
394
395 #define extent_for_each_ptr_decode(_e, _ptr, _entry)                    \
396         for ((_ptr) = __extent_ptr_decoded_init((_e).k),                \
397              (_entry) = (_e).v->start;                                  \
398              __extent_ptr_next_decode(_e, _ptr, _entry);                \
399              (_entry) = extent_entry_next(_entry))
400
401 /* Iterate over pointers backwards: */
402
403 void bch2_extent_crc_append(struct bkey_i_extent *,
404                             struct bch_extent_crc_unpacked);
405 void bch2_extent_ptr_decoded_append(struct bkey_i_extent *,
406                                     struct extent_ptr_decoded *);
407
408 static inline void __extent_entry_insert(struct bkey_i_extent *e,
409                                          union bch_extent_entry *dst,
410                                          union bch_extent_entry *new)
411 {
412         union bch_extent_entry *end = extent_entry_last(extent_i_to_s(e));
413
414         memmove_u64s_up((u64 *) dst + extent_entry_u64s(new),
415                         dst, (u64 *) end - (u64 *) dst);
416         e->k.u64s += extent_entry_u64s(new);
417         memcpy(dst, new, extent_entry_bytes(new));
418 }
419
420 static inline void __extent_entry_push(struct bkey_i_extent *e)
421 {
422         union bch_extent_entry *entry = extent_entry_last(extent_i_to_s(e));
423
424         EBUG_ON(bkey_val_u64s(&e->k) + extent_entry_u64s(entry) >
425                 BKEY_EXTENT_VAL_U64s_MAX);
426
427         e->k.u64s += extent_entry_u64s(entry);
428 }
429
430 static inline void extent_ptr_append(struct bkey_i_extent *e,
431                                      struct bch_extent_ptr ptr)
432 {
433         ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
434         extent_entry_last(extent_i_to_s(e))->ptr = ptr;
435         __extent_entry_push(e);
436 }
437
438 static inline struct bch_devs_list bch2_extent_devs(struct bkey_s_c_extent e)
439 {
440         struct bch_devs_list ret = (struct bch_devs_list) { 0 };
441         const struct bch_extent_ptr *ptr;
442
443         extent_for_each_ptr(e, ptr)
444                 ret.devs[ret.nr++] = ptr->dev;
445
446         return ret;
447 }
448
449 static inline struct bch_devs_list bch2_extent_dirty_devs(struct bkey_s_c_extent e)
450 {
451         struct bch_devs_list ret = (struct bch_devs_list) { 0 };
452         const struct bch_extent_ptr *ptr;
453
454         extent_for_each_ptr(e, ptr)
455                 if (!ptr->cached)
456                         ret.devs[ret.nr++] = ptr->dev;
457
458         return ret;
459 }
460
461 static inline struct bch_devs_list bch2_extent_cached_devs(struct bkey_s_c_extent e)
462 {
463         struct bch_devs_list ret = (struct bch_devs_list) { 0 };
464         const struct bch_extent_ptr *ptr;
465
466         extent_for_each_ptr(e, ptr)
467                 if (ptr->cached)
468                         ret.devs[ret.nr++] = ptr->dev;
469
470         return ret;
471 }
472
473 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
474 {
475         switch (k.k->type) {
476         case BCH_EXTENT:
477         case BCH_EXTENT_CACHED:
478                 return bch2_extent_devs(bkey_s_c_to_extent(k));
479         default:
480                 return (struct bch_devs_list) { .nr = 0 };
481         }
482 }
483
484 static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
485 {
486         switch (k.k->type) {
487         case BCH_EXTENT:
488         case BCH_EXTENT_CACHED:
489                 return bch2_extent_dirty_devs(bkey_s_c_to_extent(k));
490         default:
491                 return (struct bch_devs_list) { .nr = 0 };
492         }
493 }
494
495 static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
496 {
497         switch (k.k->type) {
498         case BCH_EXTENT:
499         case BCH_EXTENT_CACHED:
500                 return bch2_extent_cached_devs(bkey_s_c_to_extent(k));
501         default:
502                 return (struct bch_devs_list) { .nr = 0 };
503         }
504 }
505
506 bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent,
507                                  struct bch_extent_crc_unpacked);
508 bool bch2_extent_narrow_crcs(struct bkey_i_extent *, struct bch_extent_crc_unpacked);
509
510 union bch_extent_entry *bch2_extent_drop_ptr(struct bkey_s_extent ,
511                                              struct bch_extent_ptr *);
512
513 #define bch2_extent_drop_ptrs(_e, _ptr, _cond)                          \
514 do {                                                                    \
515         _ptr = &(_e).v->start->ptr;                                     \
516                                                                         \
517         while ((_ptr = extent_ptr_next(e, _ptr))) {                     \
518                 if (_cond) {                                            \
519                         _ptr = (void *) bch2_extent_drop_ptr(_e, _ptr); \
520                         continue;                                       \
521                 }                                                       \
522                                                                         \
523                 (_ptr)++;                                               \
524         }                                                               \
525 } while (0)
526
527 bool bch2_cut_front(struct bpos, struct bkey_i *);
528 bool bch2_cut_back(struct bpos, struct bkey *);
529 void bch2_key_resize(struct bkey *, unsigned);
530
531 int bch2_check_range_allocated(struct bch_fs *, struct bpos, u64);
532
533 #endif /* _BCACHEFS_EXTENTS_H */