]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extents.c
Update bcachefs sources to fd381c355c bcachefs: Fix a null ptr deref in fsck check_ex...
[bcachefs-tools-debian] / libbcachefs / extents.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Code for managing the extent btree and dynamically updating the writeback
6  * dirty sector count.
7  */
8
9 #include "bcachefs.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_io.h"
13 #include "btree_iter.h"
14 #include "buckets.h"
15 #include "checksum.h"
16 #include "debug.h"
17 #include "disk_groups.h"
18 #include "error.h"
19 #include "extents.h"
20 #include "inode.h"
21 #include "journal.h"
22 #include "replicas.h"
23 #include "super.h"
24 #include "super-io.h"
25 #include "util.h"
26
27 #include <trace/events/bcachefs.h>
28
29 static unsigned bch2_crc_field_size_max[] = {
30         [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
31         [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
32         [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
33 };
34
35 static void bch2_extent_crc_pack(union bch_extent_crc *,
36                                  struct bch_extent_crc_unpacked,
37                                  enum bch_extent_entry_type);
38
39 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
40                                                    unsigned dev)
41 {
42         struct bch_dev_io_failures *i;
43
44         for (i = f->devs; i < f->devs + f->nr; i++)
45                 if (i->dev == dev)
46                         return i;
47
48         return NULL;
49 }
50
51 void bch2_mark_io_failure(struct bch_io_failures *failed,
52                           struct extent_ptr_decoded *p)
53 {
54         struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
55
56         if (!f) {
57                 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
58
59                 f = &failed->devs[failed->nr++];
60                 f->dev          = p->ptr.dev;
61                 f->idx          = p->idx;
62                 f->nr_failed    = 1;
63                 f->nr_retries   = 0;
64         } else if (p->idx != f->idx) {
65                 f->idx          = p->idx;
66                 f->nr_failed    = 1;
67                 f->nr_retries   = 0;
68         } else {
69                 f->nr_failed++;
70         }
71 }
72
73 /*
74  * returns true if p1 is better than p2:
75  */
76 static inline bool ptr_better(struct bch_fs *c,
77                               const struct extent_ptr_decoded p1,
78                               const struct extent_ptr_decoded p2)
79 {
80         if (likely(!p1.idx && !p2.idx)) {
81                 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
82                 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
83
84                 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
85                 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
86
87                 /* Pick at random, biased in favor of the faster device: */
88
89                 return bch2_rand_range(l1 + l2) > l1;
90         }
91
92         if (bch2_force_reconstruct_read)
93                 return p1.idx > p2.idx;
94
95         return p1.idx < p2.idx;
96 }
97
98 /*
99  * This picks a non-stale pointer, preferably from a device other than @avoid.
100  * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
101  * other devices, it will still pick a pointer from avoid.
102  */
103 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
104                                struct bch_io_failures *failed,
105                                struct extent_ptr_decoded *pick)
106 {
107         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
108         const union bch_extent_entry *entry;
109         struct extent_ptr_decoded p;
110         struct bch_dev_io_failures *f;
111         struct bch_dev *ca;
112         int ret = 0;
113
114         if (k.k->type == KEY_TYPE_error)
115                 return -EIO;
116
117         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
118                 /*
119                  * Unwritten extent: no need to actually read, treat it as a
120                  * hole and return 0s:
121                  */
122                 if (p.ptr.unwritten)
123                         return 0;
124
125                 ca = bch_dev_bkey_exists(c, p.ptr.dev);
126
127                 /*
128                  * If there are any dirty pointers it's an error if we can't
129                  * read:
130                  */
131                 if (!ret && !p.ptr.cached)
132                         ret = -EIO;
133
134                 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
135                         continue;
136
137                 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
138                 if (f)
139                         p.idx = f->nr_failed < f->nr_retries
140                                 ? f->idx
141                                 : f->idx + 1;
142
143                 if (!p.idx &&
144                     !bch2_dev_is_readable(ca))
145                         p.idx++;
146
147                 if (bch2_force_reconstruct_read &&
148                     !p.idx && p.has_ec)
149                         p.idx++;
150
151                 if (p.idx >= (unsigned) p.has_ec + 1)
152                         continue;
153
154                 if (ret > 0 && !ptr_better(c, p, *pick))
155                         continue;
156
157                 *pick = p;
158                 ret = 1;
159         }
160
161         return ret;
162 }
163
164 /* KEY_TYPE_btree_ptr: */
165
166 int bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k,
167                            unsigned flags, struct printbuf *err)
168 {
169         if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX) {
170                 prt_printf(err, "value too big (%zu > %u)",
171                        bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
172                 return -BCH_ERR_invalid_bkey;
173         }
174
175         return bch2_bkey_ptrs_invalid(c, k, flags, err);
176 }
177
178 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
179                             struct bkey_s_c k)
180 {
181         bch2_bkey_ptrs_to_text(out, c, k);
182 }
183
184 int bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
185                               unsigned flags, struct printbuf *err)
186 {
187         struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
188
189         if (bkey_val_bytes(k.k) <= sizeof(*bp.v)) {
190                 prt_printf(err, "value too small (%zu <= %zu)",
191                        bkey_val_bytes(k.k), sizeof(*bp.v));
192                 return -BCH_ERR_invalid_bkey;
193         }
194
195         if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) {
196                 prt_printf(err, "value too big (%zu > %zu)",
197                        bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
198                 return -BCH_ERR_invalid_bkey;
199         }
200
201         if (c->sb.version < bcachefs_metadata_version_snapshot &&
202             bp.v->min_key.snapshot) {
203                 prt_printf(err, "invalid min_key.snapshot (%u != 0)",
204                        bp.v->min_key.snapshot);
205                 return -BCH_ERR_invalid_bkey;
206         }
207
208         return bch2_bkey_ptrs_invalid(c, k, flags, err);
209 }
210
211 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
212                                struct bkey_s_c k)
213 {
214         struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
215
216         prt_printf(out, "seq %llx written %u min_key %s",
217                le64_to_cpu(bp.v->seq),
218                le16_to_cpu(bp.v->sectors_written),
219                BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
220
221         bch2_bpos_to_text(out, bp.v->min_key);
222         prt_printf(out, " ");
223         bch2_bkey_ptrs_to_text(out, c, k);
224 }
225
226 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
227                               unsigned big_endian, int write,
228                               struct bkey_s k)
229 {
230         struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
231
232         compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
233
234         if (version < bcachefs_metadata_version_inode_btree_change &&
235             btree_node_type_is_extents(btree_id) &&
236             !bkey_eq(bp.v->min_key, POS_MIN))
237                 bp.v->min_key = write
238                         ? bpos_nosnap_predecessor(bp.v->min_key)
239                         : bpos_nosnap_successor(bp.v->min_key);
240 }
241
242 /* KEY_TYPE_extent: */
243
244 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
245 {
246         struct bkey_ptrs   l_ptrs = bch2_bkey_ptrs(l);
247         struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
248         union bch_extent_entry *en_l;
249         const union bch_extent_entry *en_r;
250         struct extent_ptr_decoded lp, rp;
251         bool use_right_ptr;
252         struct bch_dev *ca;
253
254         en_l = l_ptrs.start;
255         en_r = r_ptrs.start;
256         while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
257                 if (extent_entry_type(en_l) != extent_entry_type(en_r))
258                         return false;
259
260                 en_l = extent_entry_next(en_l);
261                 en_r = extent_entry_next(en_r);
262         }
263
264         if (en_l < l_ptrs.end || en_r < r_ptrs.end)
265                 return false;
266
267         en_l = l_ptrs.start;
268         en_r = r_ptrs.start;
269         lp.crc = bch2_extent_crc_unpack(l.k, NULL);
270         rp.crc = bch2_extent_crc_unpack(r.k, NULL);
271
272         while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
273                __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
274                 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
275                     rp.ptr.offset + rp.crc.offset ||
276                     lp.ptr.dev                  != rp.ptr.dev ||
277                     lp.ptr.gen                  != rp.ptr.gen ||
278                     lp.ptr.unwritten            != rp.ptr.unwritten ||
279                     lp.has_ec                   != rp.has_ec)
280                         return false;
281
282                 /* Extents may not straddle buckets: */
283                 ca = bch_dev_bkey_exists(c, lp.ptr.dev);
284                 if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
285                         return false;
286
287                 if (lp.has_ec                   != rp.has_ec ||
288                     (lp.has_ec &&
289                      (lp.ec.block               != rp.ec.block ||
290                       lp.ec.redundancy          != rp.ec.redundancy ||
291                       lp.ec.idx                 != rp.ec.idx)))
292                         return false;
293
294                 if (lp.crc.compression_type     != rp.crc.compression_type ||
295                     lp.crc.nonce                != rp.crc.nonce)
296                         return false;
297
298                 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
299                     lp.crc.uncompressed_size) {
300                         /* can use left extent's crc entry */
301                 } else if (lp.crc.live_size <= rp.crc.offset) {
302                         /* can use right extent's crc entry */
303                 } else {
304                         /* check if checksums can be merged: */
305                         if (lp.crc.csum_type            != rp.crc.csum_type ||
306                             lp.crc.nonce                != rp.crc.nonce ||
307                             crc_is_compressed(lp.crc) ||
308                             !bch2_checksum_mergeable(lp.crc.csum_type))
309                                 return false;
310
311                         if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
312                             rp.crc.offset)
313                                 return false;
314
315                         if (lp.crc.csum_type &&
316                             lp.crc.uncompressed_size +
317                             rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
318                                 return false;
319                 }
320
321                 en_l = extent_entry_next(en_l);
322                 en_r = extent_entry_next(en_r);
323         }
324
325         en_l = l_ptrs.start;
326         en_r = r_ptrs.start;
327         while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
328                 if (extent_entry_is_crc(en_l)) {
329                         struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
330                         struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
331
332                         if (crc_l.uncompressed_size + crc_r.uncompressed_size >
333                             bch2_crc_field_size_max[extent_entry_type(en_l)])
334                                 return false;
335                 }
336
337                 en_l = extent_entry_next(en_l);
338                 en_r = extent_entry_next(en_r);
339         }
340
341         use_right_ptr = false;
342         en_l = l_ptrs.start;
343         en_r = r_ptrs.start;
344         while (en_l < l_ptrs.end) {
345                 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
346                     use_right_ptr)
347                         en_l->ptr = en_r->ptr;
348
349                 if (extent_entry_is_crc(en_l)) {
350                         struct bch_extent_crc_unpacked crc_l =
351                                 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
352                         struct bch_extent_crc_unpacked crc_r =
353                                 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
354
355                         use_right_ptr = false;
356
357                         if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
358                             crc_l.uncompressed_size) {
359                                 /* can use left extent's crc entry */
360                         } else if (crc_l.live_size <= crc_r.offset) {
361                                 /* can use right extent's crc entry */
362                                 crc_r.offset -= crc_l.live_size;
363                                 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
364                                                      extent_entry_type(en_l));
365                                 use_right_ptr = true;
366                         } else {
367                                 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
368                                                                  crc_l.csum,
369                                                                  crc_r.csum,
370                                                                  crc_r.uncompressed_size << 9);
371
372                                 crc_l.uncompressed_size += crc_r.uncompressed_size;
373                                 crc_l.compressed_size   += crc_r.compressed_size;
374                                 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
375                                                      extent_entry_type(en_l));
376                         }
377                 }
378
379                 en_l = extent_entry_next(en_l);
380                 en_r = extent_entry_next(en_r);
381         }
382
383         bch2_key_resize(l.k, l.k->size + r.k->size);
384         return true;
385 }
386
387 /* KEY_TYPE_reservation: */
388
389 int bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k,
390                              unsigned flags, struct printbuf *err)
391 {
392         struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
393
394         if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation)) {
395                 prt_printf(err, "incorrect value size (%zu != %zu)",
396                        bkey_val_bytes(k.k), sizeof(*r.v));
397                 return -BCH_ERR_invalid_bkey;
398         }
399
400         if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX) {
401                 prt_printf(err, "invalid nr_replicas (%u)",
402                        r.v->nr_replicas);
403                 return -BCH_ERR_invalid_bkey;
404         }
405
406         return 0;
407 }
408
409 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
410                               struct bkey_s_c k)
411 {
412         struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
413
414         prt_printf(out, "generation %u replicas %u",
415                le32_to_cpu(r.v->generation),
416                r.v->nr_replicas);
417 }
418
419 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
420 {
421         struct bkey_s_reservation l = bkey_s_to_reservation(_l);
422         struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
423
424         if (l.v->generation != r.v->generation ||
425             l.v->nr_replicas != r.v->nr_replicas)
426                 return false;
427
428         bch2_key_resize(l.k, l.k->size + r.k->size);
429         return true;
430 }
431
432 /* Extent checksum entries: */
433
434 /* returns true if not equal */
435 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
436                                          struct bch_extent_crc_unpacked r)
437 {
438         return (l.csum_type             != r.csum_type ||
439                 l.compression_type      != r.compression_type ||
440                 l.compressed_size       != r.compressed_size ||
441                 l.uncompressed_size     != r.uncompressed_size ||
442                 l.offset                != r.offset ||
443                 l.live_size             != r.live_size ||
444                 l.nonce                 != r.nonce ||
445                 bch2_crc_cmp(l.csum, r.csum));
446 }
447
448 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
449                                   struct bch_extent_crc_unpacked n)
450 {
451         return !crc_is_compressed(u) &&
452                 u.csum_type &&
453                 u.uncompressed_size > u.live_size &&
454                 bch2_csum_type_is_encryption(u.csum_type) ==
455                 bch2_csum_type_is_encryption(n.csum_type);
456 }
457
458 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
459                                  struct bch_extent_crc_unpacked n)
460 {
461         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
462         struct bch_extent_crc_unpacked crc;
463         const union bch_extent_entry *i;
464
465         if (!n.csum_type)
466                 return false;
467
468         bkey_for_each_crc(k.k, ptrs, crc, i)
469                 if (can_narrow_crc(crc, n))
470                         return true;
471
472         return false;
473 }
474
475 /*
476  * We're writing another replica for this extent, so while we've got the data in
477  * memory we'll be computing a new checksum for the currently live data.
478  *
479  * If there are other replicas we aren't moving, and they are checksummed but
480  * not compressed, we can modify them to point to only the data that is
481  * currently live (so that readers won't have to bounce) while we've got the
482  * checksum we need:
483  */
484 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
485 {
486         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
487         struct bch_extent_crc_unpacked u;
488         struct extent_ptr_decoded p;
489         union bch_extent_entry *i;
490         bool ret = false;
491
492         /* Find a checksum entry that covers only live data: */
493         if (!n.csum_type) {
494                 bkey_for_each_crc(&k->k, ptrs, u, i)
495                         if (!crc_is_compressed(u) &&
496                             u.csum_type &&
497                             u.live_size == u.uncompressed_size) {
498                                 n = u;
499                                 goto found;
500                         }
501                 return false;
502         }
503 found:
504         BUG_ON(crc_is_compressed(n));
505         BUG_ON(n.offset);
506         BUG_ON(n.live_size != k->k.size);
507
508 restart_narrow_pointers:
509         ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
510
511         bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
512                 if (can_narrow_crc(p.crc, n)) {
513                         bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
514                         p.ptr.offset += p.crc.offset;
515                         p.crc = n;
516                         bch2_extent_ptr_decoded_append(k, &p);
517                         ret = true;
518                         goto restart_narrow_pointers;
519                 }
520
521         return ret;
522 }
523
524 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
525                                  struct bch_extent_crc_unpacked src,
526                                  enum bch_extent_entry_type type)
527 {
528 #define set_common_fields(_dst, _src)                                   \
529                 _dst.type               = 1 << type;                    \
530                 _dst.csum_type          = _src.csum_type,               \
531                 _dst.compression_type   = _src.compression_type,        \
532                 _dst._compressed_size   = _src.compressed_size - 1,     \
533                 _dst._uncompressed_size = _src.uncompressed_size - 1,   \
534                 _dst.offset             = _src.offset
535
536         switch (type) {
537         case BCH_EXTENT_ENTRY_crc32:
538                 set_common_fields(dst->crc32, src);
539                 dst->crc32.csum  = *((__le32 *) &src.csum.lo);
540                 break;
541         case BCH_EXTENT_ENTRY_crc64:
542                 set_common_fields(dst->crc64, src);
543                 dst->crc64.nonce        = src.nonce;
544                 dst->crc64.csum_lo      = src.csum.lo;
545                 dst->crc64.csum_hi      = *((__le16 *) &src.csum.hi);
546                 break;
547         case BCH_EXTENT_ENTRY_crc128:
548                 set_common_fields(dst->crc128, src);
549                 dst->crc128.nonce       = src.nonce;
550                 dst->crc128.csum        = src.csum;
551                 break;
552         default:
553                 BUG();
554         }
555 #undef set_common_fields
556 }
557
558 void bch2_extent_crc_append(struct bkey_i *k,
559                             struct bch_extent_crc_unpacked new)
560 {
561         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
562         union bch_extent_crc *crc = (void *) ptrs.end;
563         enum bch_extent_entry_type type;
564
565         if (bch_crc_bytes[new.csum_type]        <= 4 &&
566             new.uncompressed_size               <= CRC32_SIZE_MAX &&
567             new.nonce                           <= CRC32_NONCE_MAX)
568                 type = BCH_EXTENT_ENTRY_crc32;
569         else if (bch_crc_bytes[new.csum_type]   <= 10 &&
570                    new.uncompressed_size        <= CRC64_SIZE_MAX &&
571                    new.nonce                    <= CRC64_NONCE_MAX)
572                 type = BCH_EXTENT_ENTRY_crc64;
573         else if (bch_crc_bytes[new.csum_type]   <= 16 &&
574                    new.uncompressed_size        <= CRC128_SIZE_MAX &&
575                    new.nonce                    <= CRC128_NONCE_MAX)
576                 type = BCH_EXTENT_ENTRY_crc128;
577         else
578                 BUG();
579
580         bch2_extent_crc_pack(crc, new, type);
581
582         k->k.u64s += extent_entry_u64s(ptrs.end);
583
584         EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
585 }
586
587 /* Generic code for keys with pointers: */
588
589 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
590 {
591         return bch2_bkey_devs(k).nr;
592 }
593
594 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
595 {
596         return k.k->type == KEY_TYPE_reservation
597                 ? bkey_s_c_to_reservation(k).v->nr_replicas
598                 : bch2_bkey_dirty_devs(k).nr;
599 }
600
601 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
602 {
603         unsigned ret = 0;
604
605         if (k.k->type == KEY_TYPE_reservation) {
606                 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
607         } else {
608                 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
609                 const union bch_extent_entry *entry;
610                 struct extent_ptr_decoded p;
611
612                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
613                         ret += !p.ptr.cached && !crc_is_compressed(p.crc);
614         }
615
616         return ret;
617 }
618
619 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
620 {
621         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
622         const union bch_extent_entry *entry;
623         struct extent_ptr_decoded p;
624         unsigned ret = 0;
625
626         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
627                 if (!p.ptr.cached && crc_is_compressed(p.crc))
628                         ret += p.crc.compressed_size;
629
630         return ret;
631 }
632
633 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
634 {
635         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
636         const union bch_extent_entry *entry;
637         struct bch_extent_crc_unpacked crc;
638
639         bkey_for_each_crc(k.k, ptrs, crc, entry)
640                 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
641                         return true;
642         return false;
643 }
644
645 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
646 {
647         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
648         const union bch_extent_entry *entry;
649         struct extent_ptr_decoded p = { 0 };
650         unsigned replicas = 0;
651
652         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
653                 if (p.ptr.cached)
654                         continue;
655
656                 if (p.has_ec)
657                         replicas += p.ec.redundancy;
658
659                 replicas++;
660
661         }
662
663         return replicas;
664 }
665
666 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
667 {
668         unsigned durability = 0;
669         struct bch_dev *ca;
670
671         if (p->ptr.cached)
672                 return 0;
673
674         ca = bch_dev_bkey_exists(c, p->ptr.dev);
675
676         if (ca->mi.state != BCH_MEMBER_STATE_failed)
677                 durability = max_t(unsigned, durability, ca->mi.durability);
678
679         if (p->has_ec)
680                 durability += p->ec.redundancy;
681
682         return durability;
683 }
684
685 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
686 {
687         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
688         const union bch_extent_entry *entry;
689         struct extent_ptr_decoded p;
690         unsigned durability = 0;
691
692         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
693                 durability += bch2_extent_ptr_durability(c, &p);
694
695         return durability;
696 }
697
698 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
699 {
700         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
701         const union bch_extent_entry *entry;
702         struct extent_ptr_decoded p;
703         unsigned durability = 0;
704
705         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
706                 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
707                         durability += bch2_extent_ptr_durability(c, &p);
708
709         return durability;
710 }
711
712 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
713 {
714         union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
715         union bch_extent_entry *next = extent_entry_next(entry);
716
717         memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
718         k->k.u64s -= extent_entry_u64s(entry);
719 }
720
721 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
722                                     struct extent_ptr_decoded *p)
723 {
724         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
725         struct bch_extent_crc_unpacked crc =
726                 bch2_extent_crc_unpack(&k->k, NULL);
727         union bch_extent_entry *pos;
728
729         if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
730                 pos = ptrs.start;
731                 goto found;
732         }
733
734         bkey_for_each_crc(&k->k, ptrs, crc, pos)
735                 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
736                         pos = extent_entry_next(pos);
737                         goto found;
738                 }
739
740         bch2_extent_crc_append(k, p->crc);
741         pos = bkey_val_end(bkey_i_to_s(k));
742 found:
743         p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
744         __extent_entry_insert(k, pos, to_entry(&p->ptr));
745
746         if (p->has_ec) {
747                 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
748                 __extent_entry_insert(k, pos, to_entry(&p->ec));
749         }
750 }
751
752 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
753                                           union bch_extent_entry *entry)
754 {
755         union bch_extent_entry *i = ptrs.start;
756
757         if (i == entry)
758                 return NULL;
759
760         while (extent_entry_next(i) != entry)
761                 i = extent_entry_next(i);
762         return i;
763 }
764
765 static void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
766 {
767         union bch_extent_entry *next = extent_entry_next(entry);
768
769         /* stripes have ptrs, but their layout doesn't work with this code */
770         BUG_ON(k.k->type == KEY_TYPE_stripe);
771
772         memmove_u64s_down(entry, next,
773                           (u64 *) bkey_val_end(k) - (u64 *) next);
774         k.k->u64s -= (u64 *) next - (u64 *) entry;
775 }
776
777 /*
778  * Returns pointer to the next entry after the one being dropped:
779  */
780 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
781                                                    struct bch_extent_ptr *ptr)
782 {
783         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
784         union bch_extent_entry *entry = to_entry(ptr), *next;
785         union bch_extent_entry *ret = entry;
786         bool drop_crc = true;
787
788         EBUG_ON(ptr < &ptrs.start->ptr ||
789                 ptr >= &ptrs.end->ptr);
790         EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
791
792         for (next = extent_entry_next(entry);
793              next != ptrs.end;
794              next = extent_entry_next(next)) {
795                 if (extent_entry_is_crc(next)) {
796                         break;
797                 } else if (extent_entry_is_ptr(next)) {
798                         drop_crc = false;
799                         break;
800                 }
801         }
802
803         extent_entry_drop(k, entry);
804
805         while ((entry = extent_entry_prev(ptrs, entry))) {
806                 if (extent_entry_is_ptr(entry))
807                         break;
808
809                 if ((extent_entry_is_crc(entry) && drop_crc) ||
810                     extent_entry_is_stripe_ptr(entry)) {
811                         ret = (void *) ret - extent_entry_bytes(entry);
812                         extent_entry_drop(k, entry);
813                 }
814         }
815
816         return ret;
817 }
818
819 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
820                                            struct bch_extent_ptr *ptr)
821 {
822         bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
823         union bch_extent_entry *ret =
824                 bch2_bkey_drop_ptr_noerror(k, ptr);
825
826         /*
827          * If we deleted all the dirty pointers and there's still cached
828          * pointers, we could set the cached pointers to dirty if they're not
829          * stale - but to do that correctly we'd need to grab an open_bucket
830          * reference so that we don't race with bucket reuse:
831          */
832         if (have_dirty &&
833             !bch2_bkey_dirty_devs(k.s_c).nr) {
834                 k.k->type = KEY_TYPE_error;
835                 set_bkey_val_u64s(k.k, 0);
836                 ret = NULL;
837         } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
838                 k.k->type = KEY_TYPE_deleted;
839                 set_bkey_val_u64s(k.k, 0);
840                 ret = NULL;
841         }
842
843         return ret;
844 }
845
846 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
847 {
848         struct bch_extent_ptr *ptr;
849
850         bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
851 }
852
853 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
854 {
855         struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
856
857         if (ptr)
858                 bch2_bkey_drop_ptr_noerror(k, ptr);
859 }
860
861 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
862 {
863         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
864         const struct bch_extent_ptr *ptr;
865
866         bkey_for_each_ptr(ptrs, ptr)
867                 if (ptr->dev == dev)
868                         return ptr;
869
870         return NULL;
871 }
872
873 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
874 {
875         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
876         const struct bch_extent_ptr *ptr;
877
878         bkey_for_each_ptr(ptrs, ptr)
879                 if (bch2_dev_in_target(c, ptr->dev, target) &&
880                     (!ptr->cached ||
881                      !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
882                         return true;
883
884         return false;
885 }
886
887 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
888                            struct bch_extent_ptr m, u64 offset)
889 {
890         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
891         const union bch_extent_entry *entry;
892         struct extent_ptr_decoded p;
893
894         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
895                 if (p.ptr.dev   == m.dev &&
896                     p.ptr.gen   == m.gen &&
897                     (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
898                     (s64) m.offset  - offset)
899                         return true;
900
901         return false;
902 }
903
904 /*
905  * Returns true if two extents refer to the same data:
906  */
907 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
908 {
909         if (k1.k->type != k2.k->type)
910                 return false;
911
912         if (bkey_extent_is_direct_data(k1.k)) {
913                 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
914                 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
915                 const union bch_extent_entry *entry1, *entry2;
916                 struct extent_ptr_decoded p1, p2;
917
918                 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
919                         return false;
920
921                 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
922                         bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
923                         if (p1.ptr.dev          == p2.ptr.dev &&
924                             p1.ptr.gen          == p2.ptr.gen &&
925                             (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
926                             (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
927                                 return true;
928
929                 return false;
930         } else {
931                 /* KEY_TYPE_deleted, etc. */
932                 return true;
933         }
934 }
935
936 struct bch_extent_ptr *
937 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
938 {
939         struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
940         union bch_extent_entry *entry2;
941         struct extent_ptr_decoded p2;
942
943         bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
944                 if (p1.ptr.dev          == p2.ptr.dev &&
945                     p1.ptr.gen          == p2.ptr.gen &&
946                     (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
947                     (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
948                         return &entry2->ptr;
949
950         return NULL;
951 }
952
953 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
954 {
955         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
956         union bch_extent_entry *entry;
957         union bch_extent_entry *ec = NULL;
958
959         bkey_extent_entry_for_each(ptrs, entry) {
960                 if (&entry->ptr == ptr) {
961                         ptr->cached = true;
962                         if (ec)
963                                 extent_entry_drop(k, ec);
964                         return;
965                 }
966
967                 if (extent_entry_is_stripe_ptr(entry))
968                         ec = entry;
969                 else if (extent_entry_is_ptr(entry))
970                         ec = NULL;
971         }
972
973         BUG();
974 }
975
976 /*
977  * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
978  *
979  * Returns true if @k should be dropped entirely
980  *
981  * For existing keys, only called when btree nodes are being rewritten, not when
982  * they're merely being compacted/resorted in memory.
983  */
984 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
985 {
986         struct bch_extent_ptr *ptr;
987
988         bch2_bkey_drop_ptrs(k, ptr,
989                 ptr->cached &&
990                 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
991
992         return bkey_deleted(k.k);
993 }
994
995 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
996                             struct bkey_s_c k)
997 {
998         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
999         const union bch_extent_entry *entry;
1000         struct bch_extent_crc_unpacked crc;
1001         const struct bch_extent_ptr *ptr;
1002         const struct bch_extent_stripe_ptr *ec;
1003         struct bch_dev *ca;
1004         bool first = true;
1005
1006         if (c)
1007                 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
1008
1009         bkey_extent_entry_for_each(ptrs, entry) {
1010                 if (!first)
1011                         prt_printf(out, " ");
1012
1013                 switch (__extent_entry_type(entry)) {
1014                 case BCH_EXTENT_ENTRY_ptr:
1015                         ptr = entry_to_ptr(entry);
1016                         ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
1017                                 ? bch_dev_bkey_exists(c, ptr->dev)
1018                                 : NULL;
1019
1020                         if (!ca) {
1021                                 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
1022                                        (u64) ptr->offset, ptr->gen,
1023                                        ptr->cached ? " cached" : "");
1024                         } else {
1025                                 u32 offset;
1026                                 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1027
1028                                 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1029                                            ptr->dev, b, offset, ptr->gen);
1030                                 if (ptr->cached)
1031                                         prt_str(out, " cached");
1032                                 if (ptr->unwritten)
1033                                         prt_str(out, " unwritten");
1034                                 if (ca && ptr_stale(ca, ptr))
1035                                         prt_printf(out, " stale");
1036                         }
1037                         break;
1038                 case BCH_EXTENT_ENTRY_crc32:
1039                 case BCH_EXTENT_ENTRY_crc64:
1040                 case BCH_EXTENT_ENTRY_crc128:
1041                         crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1042
1043                         prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
1044                                crc.compressed_size,
1045                                crc.uncompressed_size,
1046                                crc.offset, crc.nonce,
1047                                bch2_csum_types[crc.csum_type],
1048                                bch2_compression_types[crc.compression_type]);
1049                         break;
1050                 case BCH_EXTENT_ENTRY_stripe_ptr:
1051                         ec = &entry->stripe_ptr;
1052
1053                         prt_printf(out, "ec: idx %llu block %u",
1054                                (u64) ec->idx, ec->block);
1055                         break;
1056                 default:
1057                         prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1058                         return;
1059                 }
1060
1061                 first = false;
1062         }
1063 }
1064
1065 static int extent_ptr_invalid(const struct bch_fs *c,
1066                               struct bkey_s_c k,
1067                               const struct bch_extent_ptr *ptr,
1068                               unsigned size_ondisk,
1069                               bool metadata,
1070                               struct printbuf *err)
1071 {
1072         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1073         const struct bch_extent_ptr *ptr2;
1074         u64 bucket;
1075         u32 bucket_offset;
1076         struct bch_dev *ca;
1077
1078         if (!bch2_dev_exists2(c, ptr->dev)) {
1079                 prt_printf(err, "pointer to invalid device (%u)", ptr->dev);
1080                 return -BCH_ERR_invalid_bkey;
1081         }
1082
1083         ca = bch_dev_bkey_exists(c, ptr->dev);
1084         bkey_for_each_ptr(ptrs, ptr2)
1085                 if (ptr != ptr2 && ptr->dev == ptr2->dev) {
1086                         prt_printf(err, "multiple pointers to same device (%u)", ptr->dev);
1087                         return -BCH_ERR_invalid_bkey;
1088                 }
1089
1090         bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1091
1092         if (bucket >= ca->mi.nbuckets) {
1093                 prt_printf(err, "pointer past last bucket (%llu > %llu)",
1094                        bucket, ca->mi.nbuckets);
1095                 return -BCH_ERR_invalid_bkey;
1096         }
1097
1098         if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket)) {
1099                 prt_printf(err, "pointer before first bucket (%llu < %u)",
1100                        bucket, ca->mi.first_bucket);
1101                 return -BCH_ERR_invalid_bkey;
1102         }
1103
1104         if (bucket_offset + size_ondisk > ca->mi.bucket_size) {
1105                 prt_printf(err, "pointer spans multiple buckets (%u + %u > %u)",
1106                        bucket_offset, size_ondisk, ca->mi.bucket_size);
1107                 return -BCH_ERR_invalid_bkey;
1108         }
1109
1110         return 0;
1111 }
1112
1113 int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
1114                            unsigned flags, struct printbuf *err)
1115 {
1116         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1117         const union bch_extent_entry *entry;
1118         struct bch_extent_crc_unpacked crc;
1119         unsigned size_ondisk = k.k->size;
1120         unsigned nonce = UINT_MAX;
1121         unsigned nr_ptrs = 0;
1122         bool unwritten = false, have_ec = false, crc_since_last_ptr = false;
1123         int ret;
1124
1125         if (bkey_is_btree_ptr(k.k))
1126                 size_ondisk = btree_sectors(c);
1127
1128         bkey_extent_entry_for_each(ptrs, entry) {
1129                 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) {
1130                         prt_printf(err, "invalid extent entry type (got %u, max %u)",
1131                                __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1132                         return -BCH_ERR_invalid_bkey;
1133                 }
1134
1135                 if (bkey_is_btree_ptr(k.k) &&
1136                     !extent_entry_is_ptr(entry)) {
1137                         prt_printf(err, "has non ptr field");
1138                         return -BCH_ERR_invalid_bkey;
1139                 }
1140
1141                 switch (extent_entry_type(entry)) {
1142                 case BCH_EXTENT_ENTRY_ptr:
1143                         ret = extent_ptr_invalid(c, k, &entry->ptr, size_ondisk,
1144                                                  false, err);
1145                         if (ret)
1146                                 return ret;
1147
1148                         if (nr_ptrs && unwritten != entry->ptr.unwritten) {
1149                                 prt_printf(err, "extent with unwritten and written ptrs");
1150                                 return -BCH_ERR_invalid_bkey;
1151                         }
1152
1153                         if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) {
1154                                 prt_printf(err, "has unwritten ptrs");
1155                                 return -BCH_ERR_invalid_bkey;
1156                         }
1157
1158                         if (entry->ptr.cached && have_ec) {
1159                                 prt_printf(err, "cached, erasure coded ptr");
1160                                 return -BCH_ERR_invalid_bkey;
1161                         }
1162
1163                         unwritten = entry->ptr.unwritten;
1164                         have_ec = false;
1165                         crc_since_last_ptr = false;
1166                         nr_ptrs++;
1167                         break;
1168                 case BCH_EXTENT_ENTRY_crc32:
1169                 case BCH_EXTENT_ENTRY_crc64:
1170                 case BCH_EXTENT_ENTRY_crc128:
1171                         crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1172
1173                         if (crc.offset + crc.live_size >
1174                             crc.uncompressed_size) {
1175                                 prt_printf(err, "checksum offset + key size > uncompressed size");
1176                                 return -BCH_ERR_invalid_bkey;
1177                         }
1178
1179                         size_ondisk = crc.compressed_size;
1180
1181                         if (!bch2_checksum_type_valid(c, crc.csum_type)) {
1182                                 prt_printf(err, "invalid checksum type");
1183                                 return -BCH_ERR_invalid_bkey;
1184                         }
1185
1186                         if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR) {
1187                                 prt_printf(err, "invalid compression type");
1188                                 return -BCH_ERR_invalid_bkey;
1189                         }
1190
1191                         if (bch2_csum_type_is_encryption(crc.csum_type)) {
1192                                 if (nonce == UINT_MAX)
1193                                         nonce = crc.offset + crc.nonce;
1194                                 else if (nonce != crc.offset + crc.nonce) {
1195                                         prt_printf(err, "incorrect nonce");
1196                                         return -BCH_ERR_invalid_bkey;
1197                                 }
1198                         }
1199
1200                         if (crc_since_last_ptr) {
1201                                 prt_printf(err, "redundant crc entry");
1202                                 return -BCH_ERR_invalid_bkey;
1203                         }
1204                         crc_since_last_ptr = true;
1205                         break;
1206                 case BCH_EXTENT_ENTRY_stripe_ptr:
1207                         if (have_ec) {
1208                                 prt_printf(err, "redundant stripe entry");
1209                                 return -BCH_ERR_invalid_bkey;
1210                         }
1211                         have_ec = true;
1212                         break;
1213                 }
1214         }
1215
1216         if (!nr_ptrs) {
1217                 prt_str(err, "no ptrs");
1218                 return -BCH_ERR_invalid_bkey;
1219         }
1220
1221         if (nr_ptrs >= BCH_BKEY_PTRS_MAX) {
1222                 prt_str(err, "too many ptrs");
1223                 return -BCH_ERR_invalid_bkey;
1224         }
1225
1226         if (crc_since_last_ptr) {
1227                 prt_printf(err, "redundant crc entry");
1228                 return -BCH_ERR_invalid_bkey;
1229         }
1230
1231         if (have_ec) {
1232                 prt_printf(err, "redundant stripe entry");
1233                 return -BCH_ERR_invalid_bkey;
1234         }
1235
1236         return 0;
1237 }
1238
1239 void bch2_ptr_swab(struct bkey_s k)
1240 {
1241         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1242         union bch_extent_entry *entry;
1243         u64 *d;
1244
1245         for (d =  (u64 *) ptrs.start;
1246              d != (u64 *) ptrs.end;
1247              d++)
1248                 *d = swab64(*d);
1249
1250         for (entry = ptrs.start;
1251              entry < ptrs.end;
1252              entry = extent_entry_next(entry)) {
1253                 switch (extent_entry_type(entry)) {
1254                 case BCH_EXTENT_ENTRY_ptr:
1255                         break;
1256                 case BCH_EXTENT_ENTRY_crc32:
1257                         entry->crc32.csum = swab32(entry->crc32.csum);
1258                         break;
1259                 case BCH_EXTENT_ENTRY_crc64:
1260                         entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1261                         entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1262                         break;
1263                 case BCH_EXTENT_ENTRY_crc128:
1264                         entry->crc128.csum.hi = (__force __le64)
1265                                 swab64((__force u64) entry->crc128.csum.hi);
1266                         entry->crc128.csum.lo = (__force __le64)
1267                                 swab64((__force u64) entry->crc128.csum.lo);
1268                         break;
1269                 case BCH_EXTENT_ENTRY_stripe_ptr:
1270                         break;
1271                 }
1272         }
1273 }
1274
1275 /* Generic extent code: */
1276
1277 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1278 {
1279         unsigned new_val_u64s = bkey_val_u64s(k.k);
1280         int val_u64s_delta;
1281         u64 sub;
1282
1283         if (bkey_le(where, bkey_start_pos(k.k)))
1284                 return 0;
1285
1286         EBUG_ON(bkey_gt(where, k.k->p));
1287
1288         sub = where.offset - bkey_start_offset(k.k);
1289
1290         k.k->size -= sub;
1291
1292         if (!k.k->size) {
1293                 k.k->type = KEY_TYPE_deleted;
1294                 new_val_u64s = 0;
1295         }
1296
1297         switch (k.k->type) {
1298         case KEY_TYPE_extent:
1299         case KEY_TYPE_reflink_v: {
1300                 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1301                 union bch_extent_entry *entry;
1302                 bool seen_crc = false;
1303
1304                 bkey_extent_entry_for_each(ptrs, entry) {
1305                         switch (extent_entry_type(entry)) {
1306                         case BCH_EXTENT_ENTRY_ptr:
1307                                 if (!seen_crc)
1308                                         entry->ptr.offset += sub;
1309                                 break;
1310                         case BCH_EXTENT_ENTRY_crc32:
1311                                 entry->crc32.offset += sub;
1312                                 break;
1313                         case BCH_EXTENT_ENTRY_crc64:
1314                                 entry->crc64.offset += sub;
1315                                 break;
1316                         case BCH_EXTENT_ENTRY_crc128:
1317                                 entry->crc128.offset += sub;
1318                                 break;
1319                         case BCH_EXTENT_ENTRY_stripe_ptr:
1320                                 break;
1321                         }
1322
1323                         if (extent_entry_is_crc(entry))
1324                                 seen_crc = true;
1325                 }
1326
1327                 break;
1328         }
1329         case KEY_TYPE_reflink_p: {
1330                 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1331
1332                 le64_add_cpu(&p.v->idx, sub);
1333                 break;
1334         }
1335         case KEY_TYPE_inline_data:
1336         case KEY_TYPE_indirect_inline_data: {
1337                 void *p = bkey_inline_data_p(k);
1338                 unsigned bytes = bkey_inline_data_bytes(k.k);
1339
1340                 sub = min_t(u64, sub << 9, bytes);
1341
1342                 memmove(p, p + sub, bytes - sub);
1343
1344                 new_val_u64s -= sub >> 3;
1345                 break;
1346         }
1347         }
1348
1349         val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1350         BUG_ON(val_u64s_delta < 0);
1351
1352         set_bkey_val_u64s(k.k, new_val_u64s);
1353         memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1354         return -val_u64s_delta;
1355 }
1356
1357 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1358 {
1359         unsigned new_val_u64s = bkey_val_u64s(k.k);
1360         int val_u64s_delta;
1361         u64 len = 0;
1362
1363         if (bkey_ge(where, k.k->p))
1364                 return 0;
1365
1366         EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1367
1368         len = where.offset - bkey_start_offset(k.k);
1369
1370         k.k->p.offset = where.offset;
1371         k.k->size = len;
1372
1373         if (!len) {
1374                 k.k->type = KEY_TYPE_deleted;
1375                 new_val_u64s = 0;
1376         }
1377
1378         switch (k.k->type) {
1379         case KEY_TYPE_inline_data:
1380         case KEY_TYPE_indirect_inline_data:
1381                 new_val_u64s = (bkey_inline_data_offset(k.k) +
1382                                 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1383                 break;
1384         }
1385
1386         val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1387         BUG_ON(val_u64s_delta < 0);
1388
1389         set_bkey_val_u64s(k.k, new_val_u64s);
1390         memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1391         return -val_u64s_delta;
1392 }