]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extents.h
Update bcachefs sources to 6406e05835 bcachefs: Nocow support
[bcachefs-tools-debian] / libbcachefs / extents.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EXTENTS_H
3 #define _BCACHEFS_EXTENTS_H
4
5 #include "bcachefs.h"
6 #include "bkey.h"
7 #include "extents_types.h"
8
9 struct bch_fs;
10 struct btree_trans;
11
12 /* extent entries: */
13
14 #define extent_entry_last(_e)                                           \
15         ((typeof(&(_e).v->start[0])) bkey_val_end(_e))
16
17 #define entry_to_ptr(_entry)                                            \
18 ({                                                                      \
19         EBUG_ON((_entry) && !extent_entry_is_ptr(_entry));              \
20                                                                         \
21         __builtin_choose_expr(                                          \
22                 type_is_exact(_entry, const union bch_extent_entry *),  \
23                 (const struct bch_extent_ptr *) (_entry),               \
24                 (struct bch_extent_ptr *) (_entry));                    \
25 })
26
27 /* downcast, preserves const */
28 #define to_entry(_entry)                                                \
29 ({                                                                      \
30         BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) &&        \
31                      !type_is(_entry, struct bch_extent_ptr *) &&       \
32                      !type_is(_entry, struct bch_extent_stripe_ptr *)); \
33                                                                         \
34         __builtin_choose_expr(                                          \
35                 (type_is_exact(_entry, const union bch_extent_crc *) || \
36                  type_is_exact(_entry, const struct bch_extent_ptr *) ||\
37                  type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
38                 (const union bch_extent_entry *) (_entry),              \
39                 (union bch_extent_entry *) (_entry));                   \
40 })
41
42 #define extent_entry_next(_entry)                                       \
43         ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
44
45 static inline unsigned
46 __extent_entry_type(const union bch_extent_entry *e)
47 {
48         return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
49 }
50
51 static inline enum bch_extent_entry_type
52 extent_entry_type(const union bch_extent_entry *e)
53 {
54         int ret = __ffs(e->type);
55
56         EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
57
58         return ret;
59 }
60
61 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
62 {
63         switch (extent_entry_type(entry)) {
64 #define x(f, n)                                         \
65         case BCH_EXTENT_ENTRY_##f:                      \
66                 return sizeof(struct bch_extent_##f);
67         BCH_EXTENT_ENTRY_TYPES()
68 #undef x
69         default:
70                 BUG();
71         }
72 }
73
74 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
75 {
76         return extent_entry_bytes(entry) / sizeof(u64);
77 }
78
79 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
80 {
81         return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
82 }
83
84 static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e)
85 {
86         return extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr;
87 }
88
89 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
90 {
91         switch (extent_entry_type(e)) {
92         case BCH_EXTENT_ENTRY_crc32:
93         case BCH_EXTENT_ENTRY_crc64:
94         case BCH_EXTENT_ENTRY_crc128:
95                 return true;
96         default:
97                 return false;
98         }
99 }
100
101 union bch_extent_crc {
102         u8                              type;
103         struct bch_extent_crc32         crc32;
104         struct bch_extent_crc64         crc64;
105         struct bch_extent_crc128        crc128;
106 };
107
108 #define __entry_to_crc(_entry)                                          \
109         __builtin_choose_expr(                                          \
110                 type_is_exact(_entry, const union bch_extent_entry *),  \
111                 (const union bch_extent_crc *) (_entry),                \
112                 (union bch_extent_crc *) (_entry))
113
114 #define entry_to_crc(_entry)                                            \
115 ({                                                                      \
116         EBUG_ON((_entry) && !extent_entry_is_crc(_entry));              \
117                                                                         \
118         __entry_to_crc(_entry);                                         \
119 })
120
121 static inline struct bch_extent_crc_unpacked
122 bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
123 {
124 #define common_fields(_crc)                                             \
125                 .csum_type              = _crc.csum_type,               \
126                 .compression_type       = _crc.compression_type,        \
127                 .compressed_size        = _crc._compressed_size + 1,    \
128                 .uncompressed_size      = _crc._uncompressed_size + 1,  \
129                 .offset                 = _crc.offset,                  \
130                 .live_size              = k->size
131
132         if (!crc)
133                 return (struct bch_extent_crc_unpacked) {
134                         .compressed_size        = k->size,
135                         .uncompressed_size      = k->size,
136                         .live_size              = k->size,
137                 };
138
139         switch (extent_entry_type(to_entry(crc))) {
140         case BCH_EXTENT_ENTRY_crc32: {
141                 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
142                         common_fields(crc->crc32),
143                 };
144
145                 *((__le32 *) &ret.csum.lo) = crc->crc32.csum;
146
147                 memcpy(&ret.csum.lo, &crc->crc32.csum,
148                        sizeof(crc->crc32.csum));
149
150                 return ret;
151         }
152         case BCH_EXTENT_ENTRY_crc64: {
153                 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
154                         common_fields(crc->crc64),
155                         .nonce                  = crc->crc64.nonce,
156                         .csum.lo                = (__force __le64) crc->crc64.csum_lo,
157                 };
158
159                 *((__le16 *) &ret.csum.hi) = crc->crc64.csum_hi;
160
161                 return ret;
162         }
163         case BCH_EXTENT_ENTRY_crc128: {
164                 struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
165                         common_fields(crc->crc128),
166                         .nonce                  = crc->crc128.nonce,
167                         .csum                   = crc->crc128.csum,
168                 };
169
170                 return ret;
171         }
172         default:
173                 BUG();
174         }
175 #undef common_fields
176 }
177
178 static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
179 {
180         return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
181                 crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
182 }
183
184 /* bkey_ptrs: generically over any key type that has ptrs */
185
186 struct bkey_ptrs_c {
187         const union bch_extent_entry    *start;
188         const union bch_extent_entry    *end;
189 };
190
191 struct bkey_ptrs {
192         union bch_extent_entry  *start;
193         union bch_extent_entry  *end;
194 };
195
196 static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
197 {
198         switch (k.k->type) {
199         case KEY_TYPE_btree_ptr: {
200                 struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
201
202                 return (struct bkey_ptrs_c) {
203                         to_entry(&e.v->start[0]),
204                         to_entry(extent_entry_last(e))
205                 };
206         }
207         case KEY_TYPE_extent: {
208                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
209
210                 return (struct bkey_ptrs_c) {
211                         e.v->start,
212                         extent_entry_last(e)
213                 };
214         }
215         case KEY_TYPE_stripe: {
216                 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
217
218                 return (struct bkey_ptrs_c) {
219                         to_entry(&s.v->ptrs[0]),
220                         to_entry(&s.v->ptrs[s.v->nr_blocks]),
221                 };
222         }
223         case KEY_TYPE_reflink_v: {
224                 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
225
226                 return (struct bkey_ptrs_c) {
227                         r.v->start,
228                         bkey_val_end(r),
229                 };
230         }
231         case KEY_TYPE_btree_ptr_v2: {
232                 struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
233
234                 return (struct bkey_ptrs_c) {
235                         to_entry(&e.v->start[0]),
236                         to_entry(extent_entry_last(e))
237                 };
238         }
239         default:
240                 return (struct bkey_ptrs_c) { NULL, NULL };
241         }
242 }
243
244 static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
245 {
246         struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
247
248         return (struct bkey_ptrs) {
249                 (void *) p.start,
250                 (void *) p.end
251         };
252 }
253
254 #define __bkey_extent_entry_for_each_from(_start, _end, _entry)         \
255         for ((_entry) = (_start);                                       \
256              (_entry) < (_end);                                         \
257              (_entry) = extent_entry_next(_entry))
258
259 #define __bkey_ptr_next(_ptr, _end)                                     \
260 ({                                                                      \
261         typeof(_end) _entry;                                            \
262                                                                         \
263         __bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry) \
264                 if (extent_entry_is_ptr(_entry))                        \
265                         break;                                          \
266                                                                         \
267         _entry < (_end) ? entry_to_ptr(_entry) : NULL;                  \
268 })
269
270 #define bkey_extent_entry_for_each_from(_p, _entry, _start)             \
271         __bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
272
273 #define bkey_extent_entry_for_each(_p, _entry)                          \
274         bkey_extent_entry_for_each_from(_p, _entry, _p.start)
275
276 #define __bkey_for_each_ptr(_start, _end, _ptr)                         \
277         for ((_ptr) = (_start);                                         \
278              ((_ptr) = __bkey_ptr_next(_ptr, _end));                    \
279              (_ptr)++)
280
281 #define bkey_ptr_next(_p, _ptr)                                         \
282         __bkey_ptr_next(_ptr, (_p).end)
283
284 #define bkey_for_each_ptr(_p, _ptr)                                     \
285         __bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
286
287 #define __bkey_ptr_next_decode(_k, _end, _ptr, _entry)                  \
288 ({                                                                      \
289         __label__ out;                                                  \
290                                                                         \
291         (_ptr).idx      = 0;                                            \
292         (_ptr).has_ec   = false;                                        \
293                                                                         \
294         __bkey_extent_entry_for_each_from(_entry, _end, _entry)         \
295                 switch (extent_entry_type(_entry)) {                    \
296                 case BCH_EXTENT_ENTRY_ptr:                              \
297                         (_ptr).ptr              = _entry->ptr;          \
298                         goto out;                                       \
299                 case BCH_EXTENT_ENTRY_crc32:                            \
300                 case BCH_EXTENT_ENTRY_crc64:                            \
301                 case BCH_EXTENT_ENTRY_crc128:                           \
302                         (_ptr).crc = bch2_extent_crc_unpack(_k,         \
303                                         entry_to_crc(_entry));          \
304                         break;                                          \
305                 case BCH_EXTENT_ENTRY_stripe_ptr:                       \
306                         (_ptr).ec = _entry->stripe_ptr;                 \
307                         (_ptr).has_ec   = true;                         \
308                         break;                                          \
309                 }                                                       \
310 out:                                                                    \
311         _entry < (_end);                                                \
312 })
313
314 #define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry)      \
315         for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL),             \
316              (_entry) = _start;                                         \
317              __bkey_ptr_next_decode(_k, _end, _ptr, _entry);            \
318              (_entry) = extent_entry_next(_entry))
319
320 #define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry)                  \
321         __bkey_for_each_ptr_decode(_k, (_p).start, (_p).end,            \
322                                    _ptr, _entry)
323
324 #define bkey_crc_next(_k, _start, _end, _crc, _iter)                    \
325 ({                                                                      \
326         __bkey_extent_entry_for_each_from(_iter, _end, _iter)           \
327                 if (extent_entry_is_crc(_iter)) {                       \
328                         (_crc) = bch2_extent_crc_unpack(_k,             \
329                                                 entry_to_crc(_iter));   \
330                         break;                                          \
331                 }                                                       \
332                                                                         \
333         (_iter) < (_end);                                               \
334 })
335
336 #define __bkey_for_each_crc(_k, _start, _end, _crc, _iter)              \
337         for ((_crc) = bch2_extent_crc_unpack(_k, NULL),                 \
338              (_iter) = (_start);                                        \
339              bkey_crc_next(_k, _start, _end, _crc, _iter);              \
340              (_iter) = extent_entry_next(_iter))
341
342 #define bkey_for_each_crc(_k, _p, _crc, _iter)                          \
343         __bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
344
345 /* Iterate over pointers in KEY_TYPE_extent: */
346
347 #define extent_for_each_entry_from(_e, _entry, _start)                  \
348         __bkey_extent_entry_for_each_from(_start,                       \
349                                 extent_entry_last(_e), _entry)
350
351 #define extent_for_each_entry(_e, _entry)                               \
352         extent_for_each_entry_from(_e, _entry, (_e).v->start)
353
354 #define extent_ptr_next(_e, _ptr)                                       \
355         __bkey_ptr_next(_ptr, extent_entry_last(_e))
356
357 #define extent_for_each_ptr(_e, _ptr)                                   \
358         __bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
359
360 #define extent_for_each_ptr_decode(_e, _ptr, _entry)                    \
361         __bkey_for_each_ptr_decode((_e).k, (_e).v->start,               \
362                                    extent_entry_last(_e), _ptr, _entry)
363
364 /* utility code common to all keys with pointers: */
365
366 void bch2_mark_io_failure(struct bch_io_failures *,
367                           struct extent_ptr_decoded *);
368 int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
369                                struct bch_io_failures *,
370                                struct extent_ptr_decoded *);
371
372 /* KEY_TYPE_btree_ptr: */
373
374 int bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
375 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
376                             struct bkey_s_c);
377
378 int bch2_btree_ptr_v2_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
379 void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
380 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
381                               int, struct bkey_s);
382
383 #define bch2_bkey_ops_btree_ptr ((struct bkey_ops) {            \
384         .key_invalid    = bch2_btree_ptr_invalid,               \
385         .val_to_text    = bch2_btree_ptr_to_text,               \
386         .swab           = bch2_ptr_swab,                        \
387         .trans_trigger  = bch2_trans_mark_extent,               \
388         .atomic_trigger = bch2_mark_extent,                     \
389 })
390
391 #define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) {         \
392         .key_invalid    = bch2_btree_ptr_v2_invalid,            \
393         .val_to_text    = bch2_btree_ptr_v2_to_text,            \
394         .swab           = bch2_ptr_swab,                        \
395         .compat         = bch2_btree_ptr_v2_compat,             \
396         .trans_trigger  = bch2_trans_mark_extent,               \
397         .atomic_trigger = bch2_mark_extent,                     \
398 })
399
400 /* KEY_TYPE_extent: */
401
402 bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
403
404 #define bch2_bkey_ops_extent ((struct bkey_ops) {               \
405         .key_invalid    = bch2_bkey_ptrs_invalid,               \
406         .val_to_text    = bch2_bkey_ptrs_to_text,               \
407         .swab           = bch2_ptr_swab,                        \
408         .key_normalize  = bch2_extent_normalize,                \
409         .key_merge      = bch2_extent_merge,                    \
410         .trans_trigger  = bch2_trans_mark_extent,               \
411         .atomic_trigger = bch2_mark_extent,                     \
412 })
413
414 /* KEY_TYPE_reservation: */
415
416 int bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c,
417                              int, struct printbuf *);
418 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
419 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
420
421 #define bch2_bkey_ops_reservation ((struct bkey_ops) {          \
422         .key_invalid    = bch2_reservation_invalid,             \
423         .val_to_text    = bch2_reservation_to_text,             \
424         .key_merge      = bch2_reservation_merge,               \
425         .trans_trigger  = bch2_trans_mark_reservation,          \
426         .atomic_trigger = bch2_mark_reservation,                \
427 })
428
429 /* Extent checksum entries: */
430
431 bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
432                                  struct bch_extent_crc_unpacked);
433 bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
434 void bch2_extent_crc_append(struct bkey_i *,
435                             struct bch_extent_crc_unpacked);
436
437 /* Generic code for keys with pointers: */
438
439 static inline bool bkey_is_btree_ptr(const struct bkey *k)
440 {
441         switch (k->type) {
442         case KEY_TYPE_btree_ptr:
443         case KEY_TYPE_btree_ptr_v2:
444                 return true;
445         default:
446                 return false;
447         }
448 }
449
450 static inline bool bkey_extent_is_direct_data(const struct bkey *k)
451 {
452         switch (k->type) {
453         case KEY_TYPE_btree_ptr:
454         case KEY_TYPE_btree_ptr_v2:
455         case KEY_TYPE_extent:
456         case KEY_TYPE_reflink_v:
457                 return true;
458         default:
459                 return false;
460         }
461 }
462
463 static inline bool bkey_extent_is_inline_data(const struct bkey *k)
464 {
465         return  k->type == KEY_TYPE_inline_data ||
466                 k->type == KEY_TYPE_indirect_inline_data;
467 }
468
469 static inline unsigned bkey_inline_data_offset(const struct bkey *k)
470 {
471         switch (k->type) {
472         case KEY_TYPE_inline_data:
473                 return sizeof(struct bch_inline_data);
474         case KEY_TYPE_indirect_inline_data:
475                 return sizeof(struct bch_indirect_inline_data);
476         default:
477                 BUG();
478         }
479 }
480
481 static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
482 {
483         return bkey_val_bytes(k) - bkey_inline_data_offset(k);
484 }
485
486 #define bkey_inline_data_p(_k)  (((void *) (_k).v) + bkey_inline_data_offset((_k).k))
487
488 static inline bool bkey_extent_is_data(const struct bkey *k)
489 {
490         return  bkey_extent_is_direct_data(k) ||
491                 bkey_extent_is_inline_data(k) ||
492                 k->type == KEY_TYPE_reflink_p;
493 }
494
495 /*
496  * Should extent be counted under inode->i_sectors?
497  */
498 static inline bool bkey_extent_is_allocation(const struct bkey *k)
499 {
500         switch (k->type) {
501         case KEY_TYPE_extent:
502         case KEY_TYPE_reservation:
503         case KEY_TYPE_reflink_p:
504         case KEY_TYPE_reflink_v:
505         case KEY_TYPE_inline_data:
506         case KEY_TYPE_indirect_inline_data:
507                 return true;
508         default:
509                 return false;
510         }
511 }
512
513 static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
514 {
515         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
516         const struct bch_extent_ptr *ptr;
517
518         bkey_for_each_ptr(ptrs, ptr)
519                 if (ptr->unwritten)
520                         return true;
521         return false;
522 }
523
524 static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
525 {
526         return k.k->type == KEY_TYPE_reservation ||
527                 bkey_extent_is_unwritten(k);
528 }
529
530 static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
531 {
532         struct bch_devs_list ret = (struct bch_devs_list) { 0 };
533         struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
534         const struct bch_extent_ptr *ptr;
535
536         bkey_for_each_ptr(p, ptr)
537                 ret.devs[ret.nr++] = ptr->dev;
538
539         return ret;
540 }
541
542 static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
543 {
544         struct bch_devs_list ret = (struct bch_devs_list) { 0 };
545         struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
546         const struct bch_extent_ptr *ptr;
547
548         bkey_for_each_ptr(p, ptr)
549                 if (!ptr->cached)
550                         ret.devs[ret.nr++] = ptr->dev;
551
552         return ret;
553 }
554
555 static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
556 {
557         struct bch_devs_list ret = (struct bch_devs_list) { 0 };
558         struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
559         const struct bch_extent_ptr *ptr;
560
561         bkey_for_each_ptr(p, ptr)
562                 if (ptr->cached)
563                         ret.devs[ret.nr++] = ptr->dev;
564
565         return ret;
566 }
567
568 static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr)
569 {
570         switch (k.k->type) {
571         case KEY_TYPE_btree_ptr:
572         case KEY_TYPE_btree_ptr_v2:
573                 return BCH_DATA_btree;
574         case KEY_TYPE_extent:
575         case KEY_TYPE_reflink_v:
576                 return BCH_DATA_user;
577         case KEY_TYPE_stripe: {
578                 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
579
580                 BUG_ON(ptr < s.v->ptrs ||
581                        ptr >= s.v->ptrs + s.v->nr_blocks);
582
583                 return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
584                         ? BCH_DATA_parity
585                         : BCH_DATA_user;
586         }
587         default:
588                 BUG();
589         }
590 }
591
592 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
593 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
594 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
595 bool bch2_bkey_is_incompressible(struct bkey_s_c);
596 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
597
598 unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
599 unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
600 unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
601
602 void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
603 void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
604 void bch2_extent_ptr_decoded_append(struct bkey_i *,
605                                     struct extent_ptr_decoded *);
606 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
607                                            struct bch_extent_ptr *);
608
609 #define bch2_bkey_drop_ptrs(_k, _ptr, _cond)                            \
610 do {                                                                    \
611         struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k);                    \
612                                                                         \
613         _ptr = &_ptrs.start->ptr;                                       \
614                                                                         \
615         while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) {                   \
616                 if (_cond) {                                            \
617                         _ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr);   \
618                         _ptrs = bch2_bkey_ptrs(_k);                     \
619                         continue;                                       \
620                 }                                                       \
621                                                                         \
622                 (_ptr)++;                                               \
623         }                                                               \
624 } while (0)
625
626 void bch2_bkey_drop_device(struct bkey_s, unsigned);
627 void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
628 const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
629 bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
630
631 bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
632                            struct bch_extent_ptr, u64);
633 bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
634 bool bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s_c);
635
636 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
637 void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
638                             struct bkey_s_c);
639 int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c,
640                            int, struct printbuf *);
641
642 void bch2_ptr_swab(struct bkey_s);
643
644 /* Generic extent code: */
645
646 enum bch_extent_overlap {
647         BCH_EXTENT_OVERLAP_ALL          = 0,
648         BCH_EXTENT_OVERLAP_BACK         = 1,
649         BCH_EXTENT_OVERLAP_FRONT        = 2,
650         BCH_EXTENT_OVERLAP_MIDDLE       = 3,
651 };
652
653 /* Returns how k overlaps with m */
654 static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
655                                                           const struct bkey *m)
656 {
657         int cmp1 = bkey_cmp(k->p, m->p) < 0;
658         int cmp2 = bkey_cmp(bkey_start_pos(k),
659                             bkey_start_pos(m)) > 0;
660
661         return (cmp1 << 1) + cmp2;
662 }
663
664 int bch2_cut_front_s(struct bpos, struct bkey_s);
665 int bch2_cut_back_s(struct bpos, struct bkey_s);
666
667 static inline void bch2_cut_front(struct bpos where, struct bkey_i *k)
668 {
669         bch2_cut_front_s(where, bkey_i_to_s(k));
670 }
671
672 static inline void bch2_cut_back(struct bpos where, struct bkey_i *k)
673 {
674         bch2_cut_back_s(where, bkey_i_to_s(k));
675 }
676
677 /**
678  * bch_key_resize - adjust size of @k
679  *
680  * bkey_start_offset(k) will be preserved, modifies where the extent ends
681  */
682 static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
683 {
684         k->p.offset -= k->size;
685         k->p.offset += new_size;
686         k->size = new_size;
687 }
688
689 /*
690  * In extent_sort_fix_overlapping(), insert_fixup_extent(),
691  * extent_merge_inline() - we're modifying keys in place that are packed. To do
692  * that we have to unpack the key, modify the unpacked key - then this
693  * copies/repacks the unpacked to the original as necessary.
694  */
695 static inline void extent_save(struct btree *b, struct bkey_packed *dst,
696                                struct bkey *src)
697 {
698         struct bkey_format *f = &b->format;
699         struct bkey_i *dst_unpacked;
700
701         if ((dst_unpacked = packed_to_bkey(dst)))
702                 dst_unpacked->k = *src;
703         else
704                 BUG_ON(!bch2_bkey_pack_key(dst, src, f));
705 }
706
707 #endif /* _BCACHEFS_EXTENTS_H */