]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extents.c
Update bcachefs sources to 84f132d569 bcachefs: fsck: Break walk_inode() up into...
[bcachefs-tools-debian] / libbcachefs / extents.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Code for managing the extent btree and dynamically updating the writeback
6  * dirty sector count.
7  */
8
9 #include "bcachefs.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_io.h"
13 #include "btree_iter.h"
14 #include "buckets.h"
15 #include "checksum.h"
16 #include "debug.h"
17 #include "disk_groups.h"
18 #include "error.h"
19 #include "extents.h"
20 #include "inode.h"
21 #include "journal.h"
22 #include "replicas.h"
23 #include "super.h"
24 #include "super-io.h"
25 #include "trace.h"
26 #include "util.h"
27
28 static unsigned bch2_crc_field_size_max[] = {
29         [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
30         [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
31         [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
32 };
33
34 static void bch2_extent_crc_pack(union bch_extent_crc *,
35                                  struct bch_extent_crc_unpacked,
36                                  enum bch_extent_entry_type);
37
38 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
39                                                    unsigned dev)
40 {
41         struct bch_dev_io_failures *i;
42
43         for (i = f->devs; i < f->devs + f->nr; i++)
44                 if (i->dev == dev)
45                         return i;
46
47         return NULL;
48 }
49
50 void bch2_mark_io_failure(struct bch_io_failures *failed,
51                           struct extent_ptr_decoded *p)
52 {
53         struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
54
55         if (!f) {
56                 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
57
58                 f = &failed->devs[failed->nr++];
59                 f->dev          = p->ptr.dev;
60                 f->idx          = p->idx;
61                 f->nr_failed    = 1;
62                 f->nr_retries   = 0;
63         } else if (p->idx != f->idx) {
64                 f->idx          = p->idx;
65                 f->nr_failed    = 1;
66                 f->nr_retries   = 0;
67         } else {
68                 f->nr_failed++;
69         }
70 }
71
72 /*
73  * returns true if p1 is better than p2:
74  */
75 static inline bool ptr_better(struct bch_fs *c,
76                               const struct extent_ptr_decoded p1,
77                               const struct extent_ptr_decoded p2)
78 {
79         if (likely(!p1.idx && !p2.idx)) {
80                 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
81                 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
82
83                 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
84                 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
85
86                 /* Pick at random, biased in favor of the faster device: */
87
88                 return bch2_rand_range(l1 + l2) > l1;
89         }
90
91         if (bch2_force_reconstruct_read)
92                 return p1.idx > p2.idx;
93
94         return p1.idx < p2.idx;
95 }
96
97 /*
98  * This picks a non-stale pointer, preferably from a device other than @avoid.
99  * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
100  * other devices, it will still pick a pointer from avoid.
101  */
102 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
103                                struct bch_io_failures *failed,
104                                struct extent_ptr_decoded *pick)
105 {
106         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
107         const union bch_extent_entry *entry;
108         struct extent_ptr_decoded p;
109         struct bch_dev_io_failures *f;
110         struct bch_dev *ca;
111         int ret = 0;
112
113         if (k.k->type == KEY_TYPE_error)
114                 return -EIO;
115
116         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
117                 /*
118                  * Unwritten extent: no need to actually read, treat it as a
119                  * hole and return 0s:
120                  */
121                 if (p.ptr.unwritten)
122                         return 0;
123
124                 ca = bch_dev_bkey_exists(c, p.ptr.dev);
125
126                 /*
127                  * If there are any dirty pointers it's an error if we can't
128                  * read:
129                  */
130                 if (!ret && !p.ptr.cached)
131                         ret = -EIO;
132
133                 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
134                         continue;
135
136                 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
137                 if (f)
138                         p.idx = f->nr_failed < f->nr_retries
139                                 ? f->idx
140                                 : f->idx + 1;
141
142                 if (!p.idx &&
143                     !bch2_dev_is_readable(ca))
144                         p.idx++;
145
146                 if (bch2_force_reconstruct_read &&
147                     !p.idx && p.has_ec)
148                         p.idx++;
149
150                 if (p.idx >= (unsigned) p.has_ec + 1)
151                         continue;
152
153                 if (ret > 0 && !ptr_better(c, p, *pick))
154                         continue;
155
156                 *pick = p;
157                 ret = 1;
158         }
159
160         return ret;
161 }
162
163 /* KEY_TYPE_btree_ptr: */
164
165 int bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k,
166                            unsigned flags, struct printbuf *err)
167 {
168         if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX) {
169                 prt_printf(err, "value too big (%zu > %u)",
170                        bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
171                 return -BCH_ERR_invalid_bkey;
172         }
173
174         return bch2_bkey_ptrs_invalid(c, k, flags, err);
175 }
176
177 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
178                             struct bkey_s_c k)
179 {
180         bch2_bkey_ptrs_to_text(out, c, k);
181 }
182
183 int bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
184                               unsigned flags, struct printbuf *err)
185 {
186         if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) {
187                 prt_printf(err, "value too big (%zu > %zu)",
188                        bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
189                 return -BCH_ERR_invalid_bkey;
190         }
191
192         return bch2_bkey_ptrs_invalid(c, k, flags, err);
193 }
194
195 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
196                                struct bkey_s_c k)
197 {
198         struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
199
200         prt_printf(out, "seq %llx written %u min_key %s",
201                le64_to_cpu(bp.v->seq),
202                le16_to_cpu(bp.v->sectors_written),
203                BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
204
205         bch2_bpos_to_text(out, bp.v->min_key);
206         prt_printf(out, " ");
207         bch2_bkey_ptrs_to_text(out, c, k);
208 }
209
210 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
211                               unsigned big_endian, int write,
212                               struct bkey_s k)
213 {
214         struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
215
216         compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
217
218         if (version < bcachefs_metadata_version_inode_btree_change &&
219             btree_node_type_is_extents(btree_id) &&
220             !bkey_eq(bp.v->min_key, POS_MIN))
221                 bp.v->min_key = write
222                         ? bpos_nosnap_predecessor(bp.v->min_key)
223                         : bpos_nosnap_successor(bp.v->min_key);
224 }
225
226 /* KEY_TYPE_extent: */
227
228 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
229 {
230         struct bkey_ptrs   l_ptrs = bch2_bkey_ptrs(l);
231         struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
232         union bch_extent_entry *en_l;
233         const union bch_extent_entry *en_r;
234         struct extent_ptr_decoded lp, rp;
235         bool use_right_ptr;
236         struct bch_dev *ca;
237
238         en_l = l_ptrs.start;
239         en_r = r_ptrs.start;
240         while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
241                 if (extent_entry_type(en_l) != extent_entry_type(en_r))
242                         return false;
243
244                 en_l = extent_entry_next(en_l);
245                 en_r = extent_entry_next(en_r);
246         }
247
248         if (en_l < l_ptrs.end || en_r < r_ptrs.end)
249                 return false;
250
251         en_l = l_ptrs.start;
252         en_r = r_ptrs.start;
253         lp.crc = bch2_extent_crc_unpack(l.k, NULL);
254         rp.crc = bch2_extent_crc_unpack(r.k, NULL);
255
256         while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
257                __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
258                 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
259                     rp.ptr.offset + rp.crc.offset ||
260                     lp.ptr.dev                  != rp.ptr.dev ||
261                     lp.ptr.gen                  != rp.ptr.gen ||
262                     lp.ptr.unwritten            != rp.ptr.unwritten ||
263                     lp.has_ec                   != rp.has_ec)
264                         return false;
265
266                 /* Extents may not straddle buckets: */
267                 ca = bch_dev_bkey_exists(c, lp.ptr.dev);
268                 if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
269                         return false;
270
271                 if (lp.has_ec                   != rp.has_ec ||
272                     (lp.has_ec &&
273                      (lp.ec.block               != rp.ec.block ||
274                       lp.ec.redundancy          != rp.ec.redundancy ||
275                       lp.ec.idx                 != rp.ec.idx)))
276                         return false;
277
278                 if (lp.crc.compression_type     != rp.crc.compression_type ||
279                     lp.crc.nonce                != rp.crc.nonce)
280                         return false;
281
282                 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
283                     lp.crc.uncompressed_size) {
284                         /* can use left extent's crc entry */
285                 } else if (lp.crc.live_size <= rp.crc.offset) {
286                         /* can use right extent's crc entry */
287                 } else {
288                         /* check if checksums can be merged: */
289                         if (lp.crc.csum_type            != rp.crc.csum_type ||
290                             lp.crc.nonce                != rp.crc.nonce ||
291                             crc_is_compressed(lp.crc) ||
292                             !bch2_checksum_mergeable(lp.crc.csum_type))
293                                 return false;
294
295                         if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
296                             rp.crc.offset)
297                                 return false;
298
299                         if (lp.crc.csum_type &&
300                             lp.crc.uncompressed_size +
301                             rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
302                                 return false;
303                 }
304
305                 en_l = extent_entry_next(en_l);
306                 en_r = extent_entry_next(en_r);
307         }
308
309         en_l = l_ptrs.start;
310         en_r = r_ptrs.start;
311         while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
312                 if (extent_entry_is_crc(en_l)) {
313                         struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
314                         struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
315
316                         if (crc_l.uncompressed_size + crc_r.uncompressed_size >
317                             bch2_crc_field_size_max[extent_entry_type(en_l)])
318                                 return false;
319                 }
320
321                 en_l = extent_entry_next(en_l);
322                 en_r = extent_entry_next(en_r);
323         }
324
325         use_right_ptr = false;
326         en_l = l_ptrs.start;
327         en_r = r_ptrs.start;
328         while (en_l < l_ptrs.end) {
329                 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
330                     use_right_ptr)
331                         en_l->ptr = en_r->ptr;
332
333                 if (extent_entry_is_crc(en_l)) {
334                         struct bch_extent_crc_unpacked crc_l =
335                                 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
336                         struct bch_extent_crc_unpacked crc_r =
337                                 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
338
339                         use_right_ptr = false;
340
341                         if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
342                             crc_l.uncompressed_size) {
343                                 /* can use left extent's crc entry */
344                         } else if (crc_l.live_size <= crc_r.offset) {
345                                 /* can use right extent's crc entry */
346                                 crc_r.offset -= crc_l.live_size;
347                                 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
348                                                      extent_entry_type(en_l));
349                                 use_right_ptr = true;
350                         } else {
351                                 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
352                                                                  crc_l.csum,
353                                                                  crc_r.csum,
354                                                                  crc_r.uncompressed_size << 9);
355
356                                 crc_l.uncompressed_size += crc_r.uncompressed_size;
357                                 crc_l.compressed_size   += crc_r.compressed_size;
358                                 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
359                                                      extent_entry_type(en_l));
360                         }
361                 }
362
363                 en_l = extent_entry_next(en_l);
364                 en_r = extent_entry_next(en_r);
365         }
366
367         bch2_key_resize(l.k, l.k->size + r.k->size);
368         return true;
369 }
370
371 /* KEY_TYPE_reservation: */
372
373 int bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k,
374                              unsigned flags, struct printbuf *err)
375 {
376         struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
377
378         if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX) {
379                 prt_printf(err, "invalid nr_replicas (%u)",
380                        r.v->nr_replicas);
381                 return -BCH_ERR_invalid_bkey;
382         }
383
384         return 0;
385 }
386
387 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
388                               struct bkey_s_c k)
389 {
390         struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
391
392         prt_printf(out, "generation %u replicas %u",
393                le32_to_cpu(r.v->generation),
394                r.v->nr_replicas);
395 }
396
397 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
398 {
399         struct bkey_s_reservation l = bkey_s_to_reservation(_l);
400         struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
401
402         if (l.v->generation != r.v->generation ||
403             l.v->nr_replicas != r.v->nr_replicas)
404                 return false;
405
406         bch2_key_resize(l.k, l.k->size + r.k->size);
407         return true;
408 }
409
410 /* Extent checksum entries: */
411
412 /* returns true if not equal */
413 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
414                                          struct bch_extent_crc_unpacked r)
415 {
416         return (l.csum_type             != r.csum_type ||
417                 l.compression_type      != r.compression_type ||
418                 l.compressed_size       != r.compressed_size ||
419                 l.uncompressed_size     != r.uncompressed_size ||
420                 l.offset                != r.offset ||
421                 l.live_size             != r.live_size ||
422                 l.nonce                 != r.nonce ||
423                 bch2_crc_cmp(l.csum, r.csum));
424 }
425
426 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
427                                   struct bch_extent_crc_unpacked n)
428 {
429         return !crc_is_compressed(u) &&
430                 u.csum_type &&
431                 u.uncompressed_size > u.live_size &&
432                 bch2_csum_type_is_encryption(u.csum_type) ==
433                 bch2_csum_type_is_encryption(n.csum_type);
434 }
435
436 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
437                                  struct bch_extent_crc_unpacked n)
438 {
439         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
440         struct bch_extent_crc_unpacked crc;
441         const union bch_extent_entry *i;
442
443         if (!n.csum_type)
444                 return false;
445
446         bkey_for_each_crc(k.k, ptrs, crc, i)
447                 if (can_narrow_crc(crc, n))
448                         return true;
449
450         return false;
451 }
452
453 /*
454  * We're writing another replica for this extent, so while we've got the data in
455  * memory we'll be computing a new checksum for the currently live data.
456  *
457  * If there are other replicas we aren't moving, and they are checksummed but
458  * not compressed, we can modify them to point to only the data that is
459  * currently live (so that readers won't have to bounce) while we've got the
460  * checksum we need:
461  */
462 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
463 {
464         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
465         struct bch_extent_crc_unpacked u;
466         struct extent_ptr_decoded p;
467         union bch_extent_entry *i;
468         bool ret = false;
469
470         /* Find a checksum entry that covers only live data: */
471         if (!n.csum_type) {
472                 bkey_for_each_crc(&k->k, ptrs, u, i)
473                         if (!crc_is_compressed(u) &&
474                             u.csum_type &&
475                             u.live_size == u.uncompressed_size) {
476                                 n = u;
477                                 goto found;
478                         }
479                 return false;
480         }
481 found:
482         BUG_ON(crc_is_compressed(n));
483         BUG_ON(n.offset);
484         BUG_ON(n.live_size != k->k.size);
485
486 restart_narrow_pointers:
487         ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
488
489         bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
490                 if (can_narrow_crc(p.crc, n)) {
491                         bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
492                         p.ptr.offset += p.crc.offset;
493                         p.crc = n;
494                         bch2_extent_ptr_decoded_append(k, &p);
495                         ret = true;
496                         goto restart_narrow_pointers;
497                 }
498
499         return ret;
500 }
501
502 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
503                                  struct bch_extent_crc_unpacked src,
504                                  enum bch_extent_entry_type type)
505 {
506 #define set_common_fields(_dst, _src)                                   \
507                 _dst.type               = 1 << type;                    \
508                 _dst.csum_type          = _src.csum_type,               \
509                 _dst.compression_type   = _src.compression_type,        \
510                 _dst._compressed_size   = _src.compressed_size - 1,     \
511                 _dst._uncompressed_size = _src.uncompressed_size - 1,   \
512                 _dst.offset             = _src.offset
513
514         switch (type) {
515         case BCH_EXTENT_ENTRY_crc32:
516                 set_common_fields(dst->crc32, src);
517                 dst->crc32.csum  = *((__le32 *) &src.csum.lo);
518                 break;
519         case BCH_EXTENT_ENTRY_crc64:
520                 set_common_fields(dst->crc64, src);
521                 dst->crc64.nonce        = src.nonce;
522                 dst->crc64.csum_lo      = src.csum.lo;
523                 dst->crc64.csum_hi      = *((__le16 *) &src.csum.hi);
524                 break;
525         case BCH_EXTENT_ENTRY_crc128:
526                 set_common_fields(dst->crc128, src);
527                 dst->crc128.nonce       = src.nonce;
528                 dst->crc128.csum        = src.csum;
529                 break;
530         default:
531                 BUG();
532         }
533 #undef set_common_fields
534 }
535
536 void bch2_extent_crc_append(struct bkey_i *k,
537                             struct bch_extent_crc_unpacked new)
538 {
539         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
540         union bch_extent_crc *crc = (void *) ptrs.end;
541         enum bch_extent_entry_type type;
542
543         if (bch_crc_bytes[new.csum_type]        <= 4 &&
544             new.uncompressed_size               <= CRC32_SIZE_MAX &&
545             new.nonce                           <= CRC32_NONCE_MAX)
546                 type = BCH_EXTENT_ENTRY_crc32;
547         else if (bch_crc_bytes[new.csum_type]   <= 10 &&
548                    new.uncompressed_size        <= CRC64_SIZE_MAX &&
549                    new.nonce                    <= CRC64_NONCE_MAX)
550                 type = BCH_EXTENT_ENTRY_crc64;
551         else if (bch_crc_bytes[new.csum_type]   <= 16 &&
552                    new.uncompressed_size        <= CRC128_SIZE_MAX &&
553                    new.nonce                    <= CRC128_NONCE_MAX)
554                 type = BCH_EXTENT_ENTRY_crc128;
555         else
556                 BUG();
557
558         bch2_extent_crc_pack(crc, new, type);
559
560         k->k.u64s += extent_entry_u64s(ptrs.end);
561
562         EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
563 }
564
565 /* Generic code for keys with pointers: */
566
567 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
568 {
569         return bch2_bkey_devs(k).nr;
570 }
571
572 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
573 {
574         return k.k->type == KEY_TYPE_reservation
575                 ? bkey_s_c_to_reservation(k).v->nr_replicas
576                 : bch2_bkey_dirty_devs(k).nr;
577 }
578
579 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
580 {
581         unsigned ret = 0;
582
583         if (k.k->type == KEY_TYPE_reservation) {
584                 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
585         } else {
586                 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
587                 const union bch_extent_entry *entry;
588                 struct extent_ptr_decoded p;
589
590                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
591                         ret += !p.ptr.cached && !crc_is_compressed(p.crc);
592         }
593
594         return ret;
595 }
596
597 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
598 {
599         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
600         const union bch_extent_entry *entry;
601         struct extent_ptr_decoded p;
602         unsigned ret = 0;
603
604         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
605                 if (!p.ptr.cached && crc_is_compressed(p.crc))
606                         ret += p.crc.compressed_size;
607
608         return ret;
609 }
610
611 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
612 {
613         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
614         const union bch_extent_entry *entry;
615         struct bch_extent_crc_unpacked crc;
616
617         bkey_for_each_crc(k.k, ptrs, crc, entry)
618                 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
619                         return true;
620         return false;
621 }
622
623 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
624 {
625         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
626         const union bch_extent_entry *entry;
627         struct extent_ptr_decoded p = { 0 };
628         unsigned replicas = 0;
629
630         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
631                 if (p.ptr.cached)
632                         continue;
633
634                 if (p.has_ec)
635                         replicas += p.ec.redundancy;
636
637                 replicas++;
638
639         }
640
641         return replicas;
642 }
643
644 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
645 {
646         struct bch_dev *ca;
647
648         if (p->ptr.cached)
649                 return 0;
650
651         ca = bch_dev_bkey_exists(c, p->ptr.dev);
652
653         return ca->mi.durability +
654                 (p->has_ec
655                  ? p->ec.redundancy
656                  : 0);
657 }
658
659 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
660 {
661         struct bch_dev *ca;
662
663         if (p->ptr.cached)
664                 return 0;
665
666         ca = bch_dev_bkey_exists(c, p->ptr.dev);
667
668         if (ca->mi.state == BCH_MEMBER_STATE_failed)
669                 return 0;
670
671         return ca->mi.durability +
672                 (p->has_ec
673                  ? p->ec.redundancy
674                  : 0);
675 }
676
677 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
678 {
679         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
680         const union bch_extent_entry *entry;
681         struct extent_ptr_decoded p;
682         unsigned durability = 0;
683
684         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
685                 durability += bch2_extent_ptr_durability(c, &p);
686
687         return durability;
688 }
689
690 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
691 {
692         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
693         const union bch_extent_entry *entry;
694         struct extent_ptr_decoded p;
695         unsigned durability = 0;
696
697         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
698                 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
699                         durability += bch2_extent_ptr_durability(c, &p);
700
701         return durability;
702 }
703
704 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
705 {
706         union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
707         union bch_extent_entry *next = extent_entry_next(entry);
708
709         memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
710         k->k.u64s -= extent_entry_u64s(entry);
711 }
712
713 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
714                                     struct extent_ptr_decoded *p)
715 {
716         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
717         struct bch_extent_crc_unpacked crc =
718                 bch2_extent_crc_unpack(&k->k, NULL);
719         union bch_extent_entry *pos;
720
721         if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
722                 pos = ptrs.start;
723                 goto found;
724         }
725
726         bkey_for_each_crc(&k->k, ptrs, crc, pos)
727                 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
728                         pos = extent_entry_next(pos);
729                         goto found;
730                 }
731
732         bch2_extent_crc_append(k, p->crc);
733         pos = bkey_val_end(bkey_i_to_s(k));
734 found:
735         p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
736         __extent_entry_insert(k, pos, to_entry(&p->ptr));
737
738         if (p->has_ec) {
739                 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
740                 __extent_entry_insert(k, pos, to_entry(&p->ec));
741         }
742 }
743
744 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
745                                           union bch_extent_entry *entry)
746 {
747         union bch_extent_entry *i = ptrs.start;
748
749         if (i == entry)
750                 return NULL;
751
752         while (extent_entry_next(i) != entry)
753                 i = extent_entry_next(i);
754         return i;
755 }
756
757 static void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
758 {
759         union bch_extent_entry *next = extent_entry_next(entry);
760
761         /* stripes have ptrs, but their layout doesn't work with this code */
762         BUG_ON(k.k->type == KEY_TYPE_stripe);
763
764         memmove_u64s_down(entry, next,
765                           (u64 *) bkey_val_end(k) - (u64 *) next);
766         k.k->u64s -= (u64 *) next - (u64 *) entry;
767 }
768
769 /*
770  * Returns pointer to the next entry after the one being dropped:
771  */
772 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
773                                                    struct bch_extent_ptr *ptr)
774 {
775         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
776         union bch_extent_entry *entry = to_entry(ptr), *next;
777         union bch_extent_entry *ret = entry;
778         bool drop_crc = true;
779
780         EBUG_ON(ptr < &ptrs.start->ptr ||
781                 ptr >= &ptrs.end->ptr);
782         EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
783
784         for (next = extent_entry_next(entry);
785              next != ptrs.end;
786              next = extent_entry_next(next)) {
787                 if (extent_entry_is_crc(next)) {
788                         break;
789                 } else if (extent_entry_is_ptr(next)) {
790                         drop_crc = false;
791                         break;
792                 }
793         }
794
795         extent_entry_drop(k, entry);
796
797         while ((entry = extent_entry_prev(ptrs, entry))) {
798                 if (extent_entry_is_ptr(entry))
799                         break;
800
801                 if ((extent_entry_is_crc(entry) && drop_crc) ||
802                     extent_entry_is_stripe_ptr(entry)) {
803                         ret = (void *) ret - extent_entry_bytes(entry);
804                         extent_entry_drop(k, entry);
805                 }
806         }
807
808         return ret;
809 }
810
811 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
812                                            struct bch_extent_ptr *ptr)
813 {
814         bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
815         union bch_extent_entry *ret =
816                 bch2_bkey_drop_ptr_noerror(k, ptr);
817
818         /*
819          * If we deleted all the dirty pointers and there's still cached
820          * pointers, we could set the cached pointers to dirty if they're not
821          * stale - but to do that correctly we'd need to grab an open_bucket
822          * reference so that we don't race with bucket reuse:
823          */
824         if (have_dirty &&
825             !bch2_bkey_dirty_devs(k.s_c).nr) {
826                 k.k->type = KEY_TYPE_error;
827                 set_bkey_val_u64s(k.k, 0);
828                 ret = NULL;
829         } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
830                 k.k->type = KEY_TYPE_deleted;
831                 set_bkey_val_u64s(k.k, 0);
832                 ret = NULL;
833         }
834
835         return ret;
836 }
837
838 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
839 {
840         struct bch_extent_ptr *ptr;
841
842         bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
843 }
844
845 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
846 {
847         struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
848
849         if (ptr)
850                 bch2_bkey_drop_ptr_noerror(k, ptr);
851 }
852
853 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
854 {
855         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
856         const struct bch_extent_ptr *ptr;
857
858         bkey_for_each_ptr(ptrs, ptr)
859                 if (ptr->dev == dev)
860                         return ptr;
861
862         return NULL;
863 }
864
865 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
866 {
867         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
868         const struct bch_extent_ptr *ptr;
869
870         bkey_for_each_ptr(ptrs, ptr)
871                 if (bch2_dev_in_target(c, ptr->dev, target) &&
872                     (!ptr->cached ||
873                      !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
874                         return true;
875
876         return false;
877 }
878
879 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
880                            struct bch_extent_ptr m, u64 offset)
881 {
882         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
883         const union bch_extent_entry *entry;
884         struct extent_ptr_decoded p;
885
886         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
887                 if (p.ptr.dev   == m.dev &&
888                     p.ptr.gen   == m.gen &&
889                     (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
890                     (s64) m.offset  - offset)
891                         return true;
892
893         return false;
894 }
895
896 /*
897  * Returns true if two extents refer to the same data:
898  */
899 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
900 {
901         if (k1.k->type != k2.k->type)
902                 return false;
903
904         if (bkey_extent_is_direct_data(k1.k)) {
905                 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
906                 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
907                 const union bch_extent_entry *entry1, *entry2;
908                 struct extent_ptr_decoded p1, p2;
909
910                 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
911                         return false;
912
913                 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
914                         bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
915                         if (p1.ptr.dev          == p2.ptr.dev &&
916                             p1.ptr.gen          == p2.ptr.gen &&
917                             (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
918                             (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
919                                 return true;
920
921                 return false;
922         } else {
923                 /* KEY_TYPE_deleted, etc. */
924                 return true;
925         }
926 }
927
928 struct bch_extent_ptr *
929 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
930 {
931         struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
932         union bch_extent_entry *entry2;
933         struct extent_ptr_decoded p2;
934
935         bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
936                 if (p1.ptr.dev          == p2.ptr.dev &&
937                     p1.ptr.gen          == p2.ptr.gen &&
938                     (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
939                     (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
940                         return &entry2->ptr;
941
942         return NULL;
943 }
944
945 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
946 {
947         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
948         union bch_extent_entry *entry;
949         union bch_extent_entry *ec = NULL;
950
951         bkey_extent_entry_for_each(ptrs, entry) {
952                 if (&entry->ptr == ptr) {
953                         ptr->cached = true;
954                         if (ec)
955                                 extent_entry_drop(k, ec);
956                         return;
957                 }
958
959                 if (extent_entry_is_stripe_ptr(entry))
960                         ec = entry;
961                 else if (extent_entry_is_ptr(entry))
962                         ec = NULL;
963         }
964
965         BUG();
966 }
967
968 /*
969  * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
970  *
971  * Returns true if @k should be dropped entirely
972  *
973  * For existing keys, only called when btree nodes are being rewritten, not when
974  * they're merely being compacted/resorted in memory.
975  */
976 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
977 {
978         struct bch_extent_ptr *ptr;
979
980         bch2_bkey_drop_ptrs(k, ptr,
981                 ptr->cached &&
982                 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
983
984         return bkey_deleted(k.k);
985 }
986
987 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
988                             struct bkey_s_c k)
989 {
990         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
991         const union bch_extent_entry *entry;
992         struct bch_extent_crc_unpacked crc;
993         const struct bch_extent_ptr *ptr;
994         const struct bch_extent_stripe_ptr *ec;
995         struct bch_dev *ca;
996         bool first = true;
997
998         if (c)
999                 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
1000
1001         bkey_extent_entry_for_each(ptrs, entry) {
1002                 if (!first)
1003                         prt_printf(out, " ");
1004
1005                 switch (__extent_entry_type(entry)) {
1006                 case BCH_EXTENT_ENTRY_ptr:
1007                         ptr = entry_to_ptr(entry);
1008                         ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
1009                                 ? bch_dev_bkey_exists(c, ptr->dev)
1010                                 : NULL;
1011
1012                         if (!ca) {
1013                                 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
1014                                        (u64) ptr->offset, ptr->gen,
1015                                        ptr->cached ? " cached" : "");
1016                         } else {
1017                                 u32 offset;
1018                                 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1019
1020                                 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1021                                            ptr->dev, b, offset, ptr->gen);
1022                                 if (ptr->cached)
1023                                         prt_str(out, " cached");
1024                                 if (ptr->unwritten)
1025                                         prt_str(out, " unwritten");
1026                                 if (ca && ptr_stale(ca, ptr))
1027                                         prt_printf(out, " stale");
1028                         }
1029                         break;
1030                 case BCH_EXTENT_ENTRY_crc32:
1031                 case BCH_EXTENT_ENTRY_crc64:
1032                 case BCH_EXTENT_ENTRY_crc128:
1033                         crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1034
1035                         prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
1036                                crc.compressed_size,
1037                                crc.uncompressed_size,
1038                                crc.offset, crc.nonce,
1039                                bch2_csum_types[crc.csum_type],
1040                                bch2_compression_types[crc.compression_type]);
1041                         break;
1042                 case BCH_EXTENT_ENTRY_stripe_ptr:
1043                         ec = &entry->stripe_ptr;
1044
1045                         prt_printf(out, "ec: idx %llu block %u",
1046                                (u64) ec->idx, ec->block);
1047                         break;
1048                 default:
1049                         prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1050                         return;
1051                 }
1052
1053                 first = false;
1054         }
1055 }
1056
1057 static int extent_ptr_invalid(const struct bch_fs *c,
1058                               struct bkey_s_c k,
1059                               const struct bch_extent_ptr *ptr,
1060                               unsigned size_ondisk,
1061                               bool metadata,
1062                               struct printbuf *err)
1063 {
1064         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1065         const struct bch_extent_ptr *ptr2;
1066         u64 bucket;
1067         u32 bucket_offset;
1068         struct bch_dev *ca;
1069
1070         if (!bch2_dev_exists2(c, ptr->dev)) {
1071                 prt_printf(err, "pointer to invalid device (%u)", ptr->dev);
1072                 return -BCH_ERR_invalid_bkey;
1073         }
1074
1075         ca = bch_dev_bkey_exists(c, ptr->dev);
1076         bkey_for_each_ptr(ptrs, ptr2)
1077                 if (ptr != ptr2 && ptr->dev == ptr2->dev) {
1078                         prt_printf(err, "multiple pointers to same device (%u)", ptr->dev);
1079                         return -BCH_ERR_invalid_bkey;
1080                 }
1081
1082         bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1083
1084         if (bucket >= ca->mi.nbuckets) {
1085                 prt_printf(err, "pointer past last bucket (%llu > %llu)",
1086                        bucket, ca->mi.nbuckets);
1087                 return -BCH_ERR_invalid_bkey;
1088         }
1089
1090         if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket)) {
1091                 prt_printf(err, "pointer before first bucket (%llu < %u)",
1092                        bucket, ca->mi.first_bucket);
1093                 return -BCH_ERR_invalid_bkey;
1094         }
1095
1096         if (bucket_offset + size_ondisk > ca->mi.bucket_size) {
1097                 prt_printf(err, "pointer spans multiple buckets (%u + %u > %u)",
1098                        bucket_offset, size_ondisk, ca->mi.bucket_size);
1099                 return -BCH_ERR_invalid_bkey;
1100         }
1101
1102         return 0;
1103 }
1104
1105 int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
1106                            unsigned flags, struct printbuf *err)
1107 {
1108         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1109         const union bch_extent_entry *entry;
1110         struct bch_extent_crc_unpacked crc;
1111         unsigned size_ondisk = k.k->size;
1112         unsigned nonce = UINT_MAX;
1113         unsigned nr_ptrs = 0;
1114         bool unwritten = false, have_ec = false, crc_since_last_ptr = false;
1115         int ret;
1116
1117         if (bkey_is_btree_ptr(k.k))
1118                 size_ondisk = btree_sectors(c);
1119
1120         bkey_extent_entry_for_each(ptrs, entry) {
1121                 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) {
1122                         prt_printf(err, "invalid extent entry type (got %u, max %u)",
1123                                __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1124                         return -BCH_ERR_invalid_bkey;
1125                 }
1126
1127                 if (bkey_is_btree_ptr(k.k) &&
1128                     !extent_entry_is_ptr(entry)) {
1129                         prt_printf(err, "has non ptr field");
1130                         return -BCH_ERR_invalid_bkey;
1131                 }
1132
1133                 switch (extent_entry_type(entry)) {
1134                 case BCH_EXTENT_ENTRY_ptr:
1135                         ret = extent_ptr_invalid(c, k, &entry->ptr, size_ondisk,
1136                                                  false, err);
1137                         if (ret)
1138                                 return ret;
1139
1140                         if (nr_ptrs && unwritten != entry->ptr.unwritten) {
1141                                 prt_printf(err, "extent with unwritten and written ptrs");
1142                                 return -BCH_ERR_invalid_bkey;
1143                         }
1144
1145                         if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) {
1146                                 prt_printf(err, "has unwritten ptrs");
1147                                 return -BCH_ERR_invalid_bkey;
1148                         }
1149
1150                         if (entry->ptr.cached && have_ec) {
1151                                 prt_printf(err, "cached, erasure coded ptr");
1152                                 return -BCH_ERR_invalid_bkey;
1153                         }
1154
1155                         unwritten = entry->ptr.unwritten;
1156                         have_ec = false;
1157                         crc_since_last_ptr = false;
1158                         nr_ptrs++;
1159                         break;
1160                 case BCH_EXTENT_ENTRY_crc32:
1161                 case BCH_EXTENT_ENTRY_crc64:
1162                 case BCH_EXTENT_ENTRY_crc128:
1163                         crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1164
1165                         if (crc.offset + crc.live_size >
1166                             crc.uncompressed_size) {
1167                                 prt_printf(err, "checksum offset + key size > uncompressed size");
1168                                 return -BCH_ERR_invalid_bkey;
1169                         }
1170
1171                         size_ondisk = crc.compressed_size;
1172
1173                         if (!bch2_checksum_type_valid(c, crc.csum_type)) {
1174                                 prt_printf(err, "invalid checksum type");
1175                                 return -BCH_ERR_invalid_bkey;
1176                         }
1177
1178                         if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR) {
1179                                 prt_printf(err, "invalid compression type");
1180                                 return -BCH_ERR_invalid_bkey;
1181                         }
1182
1183                         if (bch2_csum_type_is_encryption(crc.csum_type)) {
1184                                 if (nonce == UINT_MAX)
1185                                         nonce = crc.offset + crc.nonce;
1186                                 else if (nonce != crc.offset + crc.nonce) {
1187                                         prt_printf(err, "incorrect nonce");
1188                                         return -BCH_ERR_invalid_bkey;
1189                                 }
1190                         }
1191
1192                         if (crc_since_last_ptr) {
1193                                 prt_printf(err, "redundant crc entry");
1194                                 return -BCH_ERR_invalid_bkey;
1195                         }
1196                         crc_since_last_ptr = true;
1197                         break;
1198                 case BCH_EXTENT_ENTRY_stripe_ptr:
1199                         if (have_ec) {
1200                                 prt_printf(err, "redundant stripe entry");
1201                                 return -BCH_ERR_invalid_bkey;
1202                         }
1203                         have_ec = true;
1204                         break;
1205                 }
1206         }
1207
1208         if (!nr_ptrs) {
1209                 prt_str(err, "no ptrs");
1210                 return -BCH_ERR_invalid_bkey;
1211         }
1212
1213         if (nr_ptrs >= BCH_BKEY_PTRS_MAX) {
1214                 prt_str(err, "too many ptrs");
1215                 return -BCH_ERR_invalid_bkey;
1216         }
1217
1218         if (crc_since_last_ptr) {
1219                 prt_printf(err, "redundant crc entry");
1220                 return -BCH_ERR_invalid_bkey;
1221         }
1222
1223         if (have_ec) {
1224                 prt_printf(err, "redundant stripe entry");
1225                 return -BCH_ERR_invalid_bkey;
1226         }
1227
1228         return 0;
1229 }
1230
1231 void bch2_ptr_swab(struct bkey_s k)
1232 {
1233         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1234         union bch_extent_entry *entry;
1235         u64 *d;
1236
1237         for (d =  (u64 *) ptrs.start;
1238              d != (u64 *) ptrs.end;
1239              d++)
1240                 *d = swab64(*d);
1241
1242         for (entry = ptrs.start;
1243              entry < ptrs.end;
1244              entry = extent_entry_next(entry)) {
1245                 switch (extent_entry_type(entry)) {
1246                 case BCH_EXTENT_ENTRY_ptr:
1247                         break;
1248                 case BCH_EXTENT_ENTRY_crc32:
1249                         entry->crc32.csum = swab32(entry->crc32.csum);
1250                         break;
1251                 case BCH_EXTENT_ENTRY_crc64:
1252                         entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1253                         entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1254                         break;
1255                 case BCH_EXTENT_ENTRY_crc128:
1256                         entry->crc128.csum.hi = (__force __le64)
1257                                 swab64((__force u64) entry->crc128.csum.hi);
1258                         entry->crc128.csum.lo = (__force __le64)
1259                                 swab64((__force u64) entry->crc128.csum.lo);
1260                         break;
1261                 case BCH_EXTENT_ENTRY_stripe_ptr:
1262                         break;
1263                 }
1264         }
1265 }
1266
1267 /* Generic extent code: */
1268
1269 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1270 {
1271         unsigned new_val_u64s = bkey_val_u64s(k.k);
1272         int val_u64s_delta;
1273         u64 sub;
1274
1275         if (bkey_le(where, bkey_start_pos(k.k)))
1276                 return 0;
1277
1278         EBUG_ON(bkey_gt(where, k.k->p));
1279
1280         sub = where.offset - bkey_start_offset(k.k);
1281
1282         k.k->size -= sub;
1283
1284         if (!k.k->size) {
1285                 k.k->type = KEY_TYPE_deleted;
1286                 new_val_u64s = 0;
1287         }
1288
1289         switch (k.k->type) {
1290         case KEY_TYPE_extent:
1291         case KEY_TYPE_reflink_v: {
1292                 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1293                 union bch_extent_entry *entry;
1294                 bool seen_crc = false;
1295
1296                 bkey_extent_entry_for_each(ptrs, entry) {
1297                         switch (extent_entry_type(entry)) {
1298                         case BCH_EXTENT_ENTRY_ptr:
1299                                 if (!seen_crc)
1300                                         entry->ptr.offset += sub;
1301                                 break;
1302                         case BCH_EXTENT_ENTRY_crc32:
1303                                 entry->crc32.offset += sub;
1304                                 break;
1305                         case BCH_EXTENT_ENTRY_crc64:
1306                                 entry->crc64.offset += sub;
1307                                 break;
1308                         case BCH_EXTENT_ENTRY_crc128:
1309                                 entry->crc128.offset += sub;
1310                                 break;
1311                         case BCH_EXTENT_ENTRY_stripe_ptr:
1312                                 break;
1313                         }
1314
1315                         if (extent_entry_is_crc(entry))
1316                                 seen_crc = true;
1317                 }
1318
1319                 break;
1320         }
1321         case KEY_TYPE_reflink_p: {
1322                 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1323
1324                 le64_add_cpu(&p.v->idx, sub);
1325                 break;
1326         }
1327         case KEY_TYPE_inline_data:
1328         case KEY_TYPE_indirect_inline_data: {
1329                 void *p = bkey_inline_data_p(k);
1330                 unsigned bytes = bkey_inline_data_bytes(k.k);
1331
1332                 sub = min_t(u64, sub << 9, bytes);
1333
1334                 memmove(p, p + sub, bytes - sub);
1335
1336                 new_val_u64s -= sub >> 3;
1337                 break;
1338         }
1339         }
1340
1341         val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1342         BUG_ON(val_u64s_delta < 0);
1343
1344         set_bkey_val_u64s(k.k, new_val_u64s);
1345         memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1346         return -val_u64s_delta;
1347 }
1348
1349 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1350 {
1351         unsigned new_val_u64s = bkey_val_u64s(k.k);
1352         int val_u64s_delta;
1353         u64 len = 0;
1354
1355         if (bkey_ge(where, k.k->p))
1356                 return 0;
1357
1358         EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1359
1360         len = where.offset - bkey_start_offset(k.k);
1361
1362         k.k->p.offset = where.offset;
1363         k.k->size = len;
1364
1365         if (!len) {
1366                 k.k->type = KEY_TYPE_deleted;
1367                 new_val_u64s = 0;
1368         }
1369
1370         switch (k.k->type) {
1371         case KEY_TYPE_inline_data:
1372         case KEY_TYPE_indirect_inline_data:
1373                 new_val_u64s = (bkey_inline_data_offset(k.k) +
1374                                 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1375                 break;
1376         }
1377
1378         val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1379         BUG_ON(val_u64s_delta < 0);
1380
1381         set_bkey_val_u64s(k.k, new_val_u64s);
1382         memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1383         return -val_u64s_delta;
1384 }