]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extents.c
Merge pull request #211 from oz123/master
[bcachefs-tools-debian] / libbcachefs / extents.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Code for managing the extent btree and dynamically updating the writeback
6  * dirty sector count.
7  */
8
9 #include "bcachefs.h"
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
12 #include "btree_gc.h"
13 #include "btree_io.h"
14 #include "btree_iter.h"
15 #include "buckets.h"
16 #include "checksum.h"
17 #include "compress.h"
18 #include "debug.h"
19 #include "disk_groups.h"
20 #include "error.h"
21 #include "extents.h"
22 #include "inode.h"
23 #include "journal.h"
24 #include "replicas.h"
25 #include "super.h"
26 #include "super-io.h"
27 #include "trace.h"
28 #include "util.h"
29
30 static unsigned bch2_crc_field_size_max[] = {
31         [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
32         [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
33         [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
34 };
35
36 static void bch2_extent_crc_pack(union bch_extent_crc *,
37                                  struct bch_extent_crc_unpacked,
38                                  enum bch_extent_entry_type);
39
40 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
41                                                    unsigned dev)
42 {
43         struct bch_dev_io_failures *i;
44
45         for (i = f->devs; i < f->devs + f->nr; i++)
46                 if (i->dev == dev)
47                         return i;
48
49         return NULL;
50 }
51
52 void bch2_mark_io_failure(struct bch_io_failures *failed,
53                           struct extent_ptr_decoded *p)
54 {
55         struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
56
57         if (!f) {
58                 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
59
60                 f = &failed->devs[failed->nr++];
61                 f->dev          = p->ptr.dev;
62                 f->idx          = p->idx;
63                 f->nr_failed    = 1;
64                 f->nr_retries   = 0;
65         } else if (p->idx != f->idx) {
66                 f->idx          = p->idx;
67                 f->nr_failed    = 1;
68                 f->nr_retries   = 0;
69         } else {
70                 f->nr_failed++;
71         }
72 }
73
74 /*
75  * returns true if p1 is better than p2:
76  */
77 static inline bool ptr_better(struct bch_fs *c,
78                               const struct extent_ptr_decoded p1,
79                               const struct extent_ptr_decoded p2)
80 {
81         if (likely(!p1.idx && !p2.idx)) {
82                 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
83                 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
84
85                 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
86                 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
87
88                 /* Pick at random, biased in favor of the faster device: */
89
90                 return bch2_rand_range(l1 + l2) > l1;
91         }
92
93         if (bch2_force_reconstruct_read)
94                 return p1.idx > p2.idx;
95
96         return p1.idx < p2.idx;
97 }
98
99 /*
100  * This picks a non-stale pointer, preferably from a device other than @avoid.
101  * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
102  * other devices, it will still pick a pointer from avoid.
103  */
104 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
105                                struct bch_io_failures *failed,
106                                struct extent_ptr_decoded *pick)
107 {
108         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
109         const union bch_extent_entry *entry;
110         struct extent_ptr_decoded p;
111         struct bch_dev_io_failures *f;
112         struct bch_dev *ca;
113         int ret = 0;
114
115         if (k.k->type == KEY_TYPE_error)
116                 return -EIO;
117
118         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
119                 /*
120                  * Unwritten extent: no need to actually read, treat it as a
121                  * hole and return 0s:
122                  */
123                 if (p.ptr.unwritten)
124                         return 0;
125
126                 ca = bch_dev_bkey_exists(c, p.ptr.dev);
127
128                 /*
129                  * If there are any dirty pointers it's an error if we can't
130                  * read:
131                  */
132                 if (!ret && !p.ptr.cached)
133                         ret = -EIO;
134
135                 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
136                         continue;
137
138                 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
139                 if (f)
140                         p.idx = f->nr_failed < f->nr_retries
141                                 ? f->idx
142                                 : f->idx + 1;
143
144                 if (!p.idx &&
145                     !bch2_dev_is_readable(ca))
146                         p.idx++;
147
148                 if (bch2_force_reconstruct_read &&
149                     !p.idx && p.has_ec)
150                         p.idx++;
151
152                 if (p.idx >= (unsigned) p.has_ec + 1)
153                         continue;
154
155                 if (ret > 0 && !ptr_better(c, p, *pick))
156                         continue;
157
158                 *pick = p;
159                 ret = 1;
160         }
161
162         return ret;
163 }
164
165 /* KEY_TYPE_btree_ptr: */
166
167 int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k,
168                            enum bkey_invalid_flags flags,
169                            struct printbuf *err)
170 {
171         int ret = 0;
172
173         bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err,
174                          btree_ptr_val_too_big,
175                          "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
176
177         ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
178 fsck_err:
179         return ret;
180 }
181
182 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
183                             struct bkey_s_c k)
184 {
185         bch2_bkey_ptrs_to_text(out, c, k);
186 }
187
188 int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
189                               enum bkey_invalid_flags flags,
190                               struct printbuf *err)
191 {
192         int ret = 0;
193
194         bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, c, err,
195                          btree_ptr_v2_val_too_big,
196                          "value too big (%zu > %zu)",
197                          bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
198
199         ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
200 fsck_err:
201         return ret;
202 }
203
204 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
205                                struct bkey_s_c k)
206 {
207         struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
208
209         prt_printf(out, "seq %llx written %u min_key %s",
210                le64_to_cpu(bp.v->seq),
211                le16_to_cpu(bp.v->sectors_written),
212                BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
213
214         bch2_bpos_to_text(out, bp.v->min_key);
215         prt_printf(out, " ");
216         bch2_bkey_ptrs_to_text(out, c, k);
217 }
218
219 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
220                               unsigned big_endian, int write,
221                               struct bkey_s k)
222 {
223         struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
224
225         compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
226
227         if (version < bcachefs_metadata_version_inode_btree_change &&
228             btree_id_is_extents(btree_id) &&
229             !bkey_eq(bp.v->min_key, POS_MIN))
230                 bp.v->min_key = write
231                         ? bpos_nosnap_predecessor(bp.v->min_key)
232                         : bpos_nosnap_successor(bp.v->min_key);
233 }
234
235 /* KEY_TYPE_extent: */
236
237 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
238 {
239         struct bkey_ptrs   l_ptrs = bch2_bkey_ptrs(l);
240         struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
241         union bch_extent_entry *en_l;
242         const union bch_extent_entry *en_r;
243         struct extent_ptr_decoded lp, rp;
244         bool use_right_ptr;
245         struct bch_dev *ca;
246
247         en_l = l_ptrs.start;
248         en_r = r_ptrs.start;
249         while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
250                 if (extent_entry_type(en_l) != extent_entry_type(en_r))
251                         return false;
252
253                 en_l = extent_entry_next(en_l);
254                 en_r = extent_entry_next(en_r);
255         }
256
257         if (en_l < l_ptrs.end || en_r < r_ptrs.end)
258                 return false;
259
260         en_l = l_ptrs.start;
261         en_r = r_ptrs.start;
262         lp.crc = bch2_extent_crc_unpack(l.k, NULL);
263         rp.crc = bch2_extent_crc_unpack(r.k, NULL);
264
265         while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
266                __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
267                 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
268                     rp.ptr.offset + rp.crc.offset ||
269                     lp.ptr.dev                  != rp.ptr.dev ||
270                     lp.ptr.gen                  != rp.ptr.gen ||
271                     lp.ptr.unwritten            != rp.ptr.unwritten ||
272                     lp.has_ec                   != rp.has_ec)
273                         return false;
274
275                 /* Extents may not straddle buckets: */
276                 ca = bch_dev_bkey_exists(c, lp.ptr.dev);
277                 if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
278                         return false;
279
280                 if (lp.has_ec                   != rp.has_ec ||
281                     (lp.has_ec &&
282                      (lp.ec.block               != rp.ec.block ||
283                       lp.ec.redundancy          != rp.ec.redundancy ||
284                       lp.ec.idx                 != rp.ec.idx)))
285                         return false;
286
287                 if (lp.crc.compression_type     != rp.crc.compression_type ||
288                     lp.crc.nonce                != rp.crc.nonce)
289                         return false;
290
291                 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
292                     lp.crc.uncompressed_size) {
293                         /* can use left extent's crc entry */
294                 } else if (lp.crc.live_size <= rp.crc.offset) {
295                         /* can use right extent's crc entry */
296                 } else {
297                         /* check if checksums can be merged: */
298                         if (lp.crc.csum_type            != rp.crc.csum_type ||
299                             lp.crc.nonce                != rp.crc.nonce ||
300                             crc_is_compressed(lp.crc) ||
301                             !bch2_checksum_mergeable(lp.crc.csum_type))
302                                 return false;
303
304                         if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
305                             rp.crc.offset)
306                                 return false;
307
308                         if (lp.crc.csum_type &&
309                             lp.crc.uncompressed_size +
310                             rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
311                                 return false;
312                 }
313
314                 en_l = extent_entry_next(en_l);
315                 en_r = extent_entry_next(en_r);
316         }
317
318         en_l = l_ptrs.start;
319         en_r = r_ptrs.start;
320         while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
321                 if (extent_entry_is_crc(en_l)) {
322                         struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
323                         struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
324
325                         if (crc_l.uncompressed_size + crc_r.uncompressed_size >
326                             bch2_crc_field_size_max[extent_entry_type(en_l)])
327                                 return false;
328                 }
329
330                 en_l = extent_entry_next(en_l);
331                 en_r = extent_entry_next(en_r);
332         }
333
334         use_right_ptr = false;
335         en_l = l_ptrs.start;
336         en_r = r_ptrs.start;
337         while (en_l < l_ptrs.end) {
338                 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
339                     use_right_ptr)
340                         en_l->ptr = en_r->ptr;
341
342                 if (extent_entry_is_crc(en_l)) {
343                         struct bch_extent_crc_unpacked crc_l =
344                                 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
345                         struct bch_extent_crc_unpacked crc_r =
346                                 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
347
348                         use_right_ptr = false;
349
350                         if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
351                             crc_l.uncompressed_size) {
352                                 /* can use left extent's crc entry */
353                         } else if (crc_l.live_size <= crc_r.offset) {
354                                 /* can use right extent's crc entry */
355                                 crc_r.offset -= crc_l.live_size;
356                                 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
357                                                      extent_entry_type(en_l));
358                                 use_right_ptr = true;
359                         } else {
360                                 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
361                                                                  crc_l.csum,
362                                                                  crc_r.csum,
363                                                                  crc_r.uncompressed_size << 9);
364
365                                 crc_l.uncompressed_size += crc_r.uncompressed_size;
366                                 crc_l.compressed_size   += crc_r.compressed_size;
367                                 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
368                                                      extent_entry_type(en_l));
369                         }
370                 }
371
372                 en_l = extent_entry_next(en_l);
373                 en_r = extent_entry_next(en_r);
374         }
375
376         bch2_key_resize(l.k, l.k->size + r.k->size);
377         return true;
378 }
379
380 /* KEY_TYPE_reservation: */
381
382 int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k,
383                              enum bkey_invalid_flags flags,
384                              struct printbuf *err)
385 {
386         struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
387         int ret = 0;
388
389         bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err,
390                          reservation_key_nr_replicas_invalid,
391                          "invalid nr_replicas (%u)", r.v->nr_replicas);
392 fsck_err:
393         return ret;
394 }
395
396 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
397                               struct bkey_s_c k)
398 {
399         struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
400
401         prt_printf(out, "generation %u replicas %u",
402                le32_to_cpu(r.v->generation),
403                r.v->nr_replicas);
404 }
405
406 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
407 {
408         struct bkey_s_reservation l = bkey_s_to_reservation(_l);
409         struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
410
411         if (l.v->generation != r.v->generation ||
412             l.v->nr_replicas != r.v->nr_replicas)
413                 return false;
414
415         bch2_key_resize(l.k, l.k->size + r.k->size);
416         return true;
417 }
418
419 /* Extent checksum entries: */
420
421 /* returns true if not equal */
422 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
423                                          struct bch_extent_crc_unpacked r)
424 {
425         return (l.csum_type             != r.csum_type ||
426                 l.compression_type      != r.compression_type ||
427                 l.compressed_size       != r.compressed_size ||
428                 l.uncompressed_size     != r.uncompressed_size ||
429                 l.offset                != r.offset ||
430                 l.live_size             != r.live_size ||
431                 l.nonce                 != r.nonce ||
432                 bch2_crc_cmp(l.csum, r.csum));
433 }
434
435 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
436                                   struct bch_extent_crc_unpacked n)
437 {
438         return !crc_is_compressed(u) &&
439                 u.csum_type &&
440                 u.uncompressed_size > u.live_size &&
441                 bch2_csum_type_is_encryption(u.csum_type) ==
442                 bch2_csum_type_is_encryption(n.csum_type);
443 }
444
445 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
446                                  struct bch_extent_crc_unpacked n)
447 {
448         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
449         struct bch_extent_crc_unpacked crc;
450         const union bch_extent_entry *i;
451
452         if (!n.csum_type)
453                 return false;
454
455         bkey_for_each_crc(k.k, ptrs, crc, i)
456                 if (can_narrow_crc(crc, n))
457                         return true;
458
459         return false;
460 }
461
462 /*
463  * We're writing another replica for this extent, so while we've got the data in
464  * memory we'll be computing a new checksum for the currently live data.
465  *
466  * If there are other replicas we aren't moving, and they are checksummed but
467  * not compressed, we can modify them to point to only the data that is
468  * currently live (so that readers won't have to bounce) while we've got the
469  * checksum we need:
470  */
471 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
472 {
473         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
474         struct bch_extent_crc_unpacked u;
475         struct extent_ptr_decoded p;
476         union bch_extent_entry *i;
477         bool ret = false;
478
479         /* Find a checksum entry that covers only live data: */
480         if (!n.csum_type) {
481                 bkey_for_each_crc(&k->k, ptrs, u, i)
482                         if (!crc_is_compressed(u) &&
483                             u.csum_type &&
484                             u.live_size == u.uncompressed_size) {
485                                 n = u;
486                                 goto found;
487                         }
488                 return false;
489         }
490 found:
491         BUG_ON(crc_is_compressed(n));
492         BUG_ON(n.offset);
493         BUG_ON(n.live_size != k->k.size);
494
495 restart_narrow_pointers:
496         ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
497
498         bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
499                 if (can_narrow_crc(p.crc, n)) {
500                         bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
501                         p.ptr.offset += p.crc.offset;
502                         p.crc = n;
503                         bch2_extent_ptr_decoded_append(k, &p);
504                         ret = true;
505                         goto restart_narrow_pointers;
506                 }
507
508         return ret;
509 }
510
511 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
512                                  struct bch_extent_crc_unpacked src,
513                                  enum bch_extent_entry_type type)
514 {
515 #define set_common_fields(_dst, _src)                                   \
516                 _dst.type               = 1 << type;                    \
517                 _dst.csum_type          = _src.csum_type,               \
518                 _dst.compression_type   = _src.compression_type,        \
519                 _dst._compressed_size   = _src.compressed_size - 1,     \
520                 _dst._uncompressed_size = _src.uncompressed_size - 1,   \
521                 _dst.offset             = _src.offset
522
523         switch (type) {
524         case BCH_EXTENT_ENTRY_crc32:
525                 set_common_fields(dst->crc32, src);
526                 dst->crc32.csum         = (u32 __force) *((__le32 *) &src.csum.lo);
527                 break;
528         case BCH_EXTENT_ENTRY_crc64:
529                 set_common_fields(dst->crc64, src);
530                 dst->crc64.nonce        = src.nonce;
531                 dst->crc64.csum_lo      = (u64 __force) src.csum.lo;
532                 dst->crc64.csum_hi      = (u64 __force) *((__le16 *) &src.csum.hi);
533                 break;
534         case BCH_EXTENT_ENTRY_crc128:
535                 set_common_fields(dst->crc128, src);
536                 dst->crc128.nonce       = src.nonce;
537                 dst->crc128.csum        = src.csum;
538                 break;
539         default:
540                 BUG();
541         }
542 #undef set_common_fields
543 }
544
545 void bch2_extent_crc_append(struct bkey_i *k,
546                             struct bch_extent_crc_unpacked new)
547 {
548         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
549         union bch_extent_crc *crc = (void *) ptrs.end;
550         enum bch_extent_entry_type type;
551
552         if (bch_crc_bytes[new.csum_type]        <= 4 &&
553             new.uncompressed_size               <= CRC32_SIZE_MAX &&
554             new.nonce                           <= CRC32_NONCE_MAX)
555                 type = BCH_EXTENT_ENTRY_crc32;
556         else if (bch_crc_bytes[new.csum_type]   <= 10 &&
557                    new.uncompressed_size        <= CRC64_SIZE_MAX &&
558                    new.nonce                    <= CRC64_NONCE_MAX)
559                 type = BCH_EXTENT_ENTRY_crc64;
560         else if (bch_crc_bytes[new.csum_type]   <= 16 &&
561                    new.uncompressed_size        <= CRC128_SIZE_MAX &&
562                    new.nonce                    <= CRC128_NONCE_MAX)
563                 type = BCH_EXTENT_ENTRY_crc128;
564         else
565                 BUG();
566
567         bch2_extent_crc_pack(crc, new, type);
568
569         k->k.u64s += extent_entry_u64s(ptrs.end);
570
571         EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
572 }
573
574 /* Generic code for keys with pointers: */
575
576 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
577 {
578         return bch2_bkey_devs(k).nr;
579 }
580
581 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
582 {
583         return k.k->type == KEY_TYPE_reservation
584                 ? bkey_s_c_to_reservation(k).v->nr_replicas
585                 : bch2_bkey_dirty_devs(k).nr;
586 }
587
588 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
589 {
590         unsigned ret = 0;
591
592         if (k.k->type == KEY_TYPE_reservation) {
593                 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
594         } else {
595                 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
596                 const union bch_extent_entry *entry;
597                 struct extent_ptr_decoded p;
598
599                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
600                         ret += !p.ptr.cached && !crc_is_compressed(p.crc);
601         }
602
603         return ret;
604 }
605
606 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
607 {
608         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
609         const union bch_extent_entry *entry;
610         struct extent_ptr_decoded p;
611         unsigned ret = 0;
612
613         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
614                 if (!p.ptr.cached && crc_is_compressed(p.crc))
615                         ret += p.crc.compressed_size;
616
617         return ret;
618 }
619
620 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
621 {
622         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
623         const union bch_extent_entry *entry;
624         struct bch_extent_crc_unpacked crc;
625
626         bkey_for_each_crc(k.k, ptrs, crc, entry)
627                 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
628                         return true;
629         return false;
630 }
631
632 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
633 {
634         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
635         const union bch_extent_entry *entry;
636         struct extent_ptr_decoded p = { 0 };
637         unsigned replicas = 0;
638
639         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
640                 if (p.ptr.cached)
641                         continue;
642
643                 if (p.has_ec)
644                         replicas += p.ec.redundancy;
645
646                 replicas++;
647
648         }
649
650         return replicas;
651 }
652
653 static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
654 {
655         if (p->ptr.cached)
656                 return 0;
657
658         return p->has_ec
659                 ? p->ec.redundancy + 1
660                 : ca->mi.durability;
661 }
662
663 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
664 {
665         struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
666
667         return __extent_ptr_durability(ca, p);
668 }
669
670 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
671 {
672         struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
673
674         if (ca->mi.state == BCH_MEMBER_STATE_failed)
675                 return 0;
676
677         return __extent_ptr_durability(ca, p);
678 }
679
680 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
681 {
682         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
683         const union bch_extent_entry *entry;
684         struct extent_ptr_decoded p;
685         unsigned durability = 0;
686
687         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
688                 durability += bch2_extent_ptr_durability(c, &p);
689
690         return durability;
691 }
692
693 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
694 {
695         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
696         const union bch_extent_entry *entry;
697         struct extent_ptr_decoded p;
698         unsigned durability = 0;
699
700         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
701                 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
702                         durability += bch2_extent_ptr_durability(c, &p);
703
704         return durability;
705 }
706
707 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
708 {
709         union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
710         union bch_extent_entry *next = extent_entry_next(entry);
711
712         memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
713         k->k.u64s -= extent_entry_u64s(entry);
714 }
715
716 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
717                                     struct extent_ptr_decoded *p)
718 {
719         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
720         struct bch_extent_crc_unpacked crc =
721                 bch2_extent_crc_unpack(&k->k, NULL);
722         union bch_extent_entry *pos;
723
724         if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
725                 pos = ptrs.start;
726                 goto found;
727         }
728
729         bkey_for_each_crc(&k->k, ptrs, crc, pos)
730                 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
731                         pos = extent_entry_next(pos);
732                         goto found;
733                 }
734
735         bch2_extent_crc_append(k, p->crc);
736         pos = bkey_val_end(bkey_i_to_s(k));
737 found:
738         p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
739         __extent_entry_insert(k, pos, to_entry(&p->ptr));
740
741         if (p->has_ec) {
742                 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
743                 __extent_entry_insert(k, pos, to_entry(&p->ec));
744         }
745 }
746
747 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
748                                           union bch_extent_entry *entry)
749 {
750         union bch_extent_entry *i = ptrs.start;
751
752         if (i == entry)
753                 return NULL;
754
755         while (extent_entry_next(i) != entry)
756                 i = extent_entry_next(i);
757         return i;
758 }
759
760 /*
761  * Returns pointer to the next entry after the one being dropped:
762  */
763 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
764                                                    struct bch_extent_ptr *ptr)
765 {
766         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
767         union bch_extent_entry *entry = to_entry(ptr), *next;
768         union bch_extent_entry *ret = entry;
769         bool drop_crc = true;
770
771         EBUG_ON(ptr < &ptrs.start->ptr ||
772                 ptr >= &ptrs.end->ptr);
773         EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
774
775         for (next = extent_entry_next(entry);
776              next != ptrs.end;
777              next = extent_entry_next(next)) {
778                 if (extent_entry_is_crc(next)) {
779                         break;
780                 } else if (extent_entry_is_ptr(next)) {
781                         drop_crc = false;
782                         break;
783                 }
784         }
785
786         extent_entry_drop(k, entry);
787
788         while ((entry = extent_entry_prev(ptrs, entry))) {
789                 if (extent_entry_is_ptr(entry))
790                         break;
791
792                 if ((extent_entry_is_crc(entry) && drop_crc) ||
793                     extent_entry_is_stripe_ptr(entry)) {
794                         ret = (void *) ret - extent_entry_bytes(entry);
795                         extent_entry_drop(k, entry);
796                 }
797         }
798
799         return ret;
800 }
801
802 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
803                                            struct bch_extent_ptr *ptr)
804 {
805         bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
806         union bch_extent_entry *ret =
807                 bch2_bkey_drop_ptr_noerror(k, ptr);
808
809         /*
810          * If we deleted all the dirty pointers and there's still cached
811          * pointers, we could set the cached pointers to dirty if they're not
812          * stale - but to do that correctly we'd need to grab an open_bucket
813          * reference so that we don't race with bucket reuse:
814          */
815         if (have_dirty &&
816             !bch2_bkey_dirty_devs(k.s_c).nr) {
817                 k.k->type = KEY_TYPE_error;
818                 set_bkey_val_u64s(k.k, 0);
819                 ret = NULL;
820         } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
821                 k.k->type = KEY_TYPE_deleted;
822                 set_bkey_val_u64s(k.k, 0);
823                 ret = NULL;
824         }
825
826         return ret;
827 }
828
829 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
830 {
831         struct bch_extent_ptr *ptr;
832
833         bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
834 }
835
836 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
837 {
838         struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
839
840         if (ptr)
841                 bch2_bkey_drop_ptr_noerror(k, ptr);
842 }
843
844 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
845 {
846         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
847
848         bkey_for_each_ptr(ptrs, ptr)
849                 if (ptr->dev == dev)
850                         return ptr;
851
852         return NULL;
853 }
854
855 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
856 {
857         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
858
859         bkey_for_each_ptr(ptrs, ptr)
860                 if (bch2_dev_in_target(c, ptr->dev, target) &&
861                     (!ptr->cached ||
862                      !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
863                         return true;
864
865         return false;
866 }
867
868 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
869                            struct bch_extent_ptr m, u64 offset)
870 {
871         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
872         const union bch_extent_entry *entry;
873         struct extent_ptr_decoded p;
874
875         bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
876                 if (p.ptr.dev   == m.dev &&
877                     p.ptr.gen   == m.gen &&
878                     (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
879                     (s64) m.offset  - offset)
880                         return true;
881
882         return false;
883 }
884
885 /*
886  * Returns true if two extents refer to the same data:
887  */
888 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
889 {
890         if (k1.k->type != k2.k->type)
891                 return false;
892
893         if (bkey_extent_is_direct_data(k1.k)) {
894                 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
895                 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
896                 const union bch_extent_entry *entry1, *entry2;
897                 struct extent_ptr_decoded p1, p2;
898
899                 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
900                         return false;
901
902                 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
903                         bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
904                                 if (p1.ptr.dev          == p2.ptr.dev &&
905                                     p1.ptr.gen          == p2.ptr.gen &&
906                                     (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
907                                     (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
908                                         return true;
909
910                 return false;
911         } else {
912                 /* KEY_TYPE_deleted, etc. */
913                 return true;
914         }
915 }
916
917 struct bch_extent_ptr *
918 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
919 {
920         struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
921         union bch_extent_entry *entry2;
922         struct extent_ptr_decoded p2;
923
924         bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
925                 if (p1.ptr.dev          == p2.ptr.dev &&
926                     p1.ptr.gen          == p2.ptr.gen &&
927                     (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
928                     (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
929                         return &entry2->ptr;
930
931         return NULL;
932 }
933
934 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
935 {
936         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
937         union bch_extent_entry *entry;
938         union bch_extent_entry *ec = NULL;
939
940         bkey_extent_entry_for_each(ptrs, entry) {
941                 if (&entry->ptr == ptr) {
942                         ptr->cached = true;
943                         if (ec)
944                                 extent_entry_drop(k, ec);
945                         return;
946                 }
947
948                 if (extent_entry_is_stripe_ptr(entry))
949                         ec = entry;
950                 else if (extent_entry_is_ptr(entry))
951                         ec = NULL;
952         }
953
954         BUG();
955 }
956
957 /*
958  * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
959  *
960  * Returns true if @k should be dropped entirely
961  *
962  * For existing keys, only called when btree nodes are being rewritten, not when
963  * they're merely being compacted/resorted in memory.
964  */
965 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
966 {
967         struct bch_extent_ptr *ptr;
968
969         bch2_bkey_drop_ptrs(k, ptr,
970                 ptr->cached &&
971                 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
972
973         return bkey_deleted(k.k);
974 }
975
976 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
977                             struct bkey_s_c k)
978 {
979         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
980         const union bch_extent_entry *entry;
981         bool first = true;
982
983         if (c)
984                 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
985
986         bkey_extent_entry_for_each(ptrs, entry) {
987                 if (!first)
988                         prt_printf(out, " ");
989
990                 switch (__extent_entry_type(entry)) {
991                 case BCH_EXTENT_ENTRY_ptr: {
992                         const struct bch_extent_ptr *ptr = entry_to_ptr(entry);
993                         struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
994                                 ? bch_dev_bkey_exists(c, ptr->dev)
995                                 : NULL;
996
997                         if (!ca) {
998                                 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
999                                        (u64) ptr->offset, ptr->gen,
1000                                        ptr->cached ? " cached" : "");
1001                         } else {
1002                                 u32 offset;
1003                                 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1004
1005                                 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1006                                            ptr->dev, b, offset, ptr->gen);
1007                                 if (ptr->cached)
1008                                         prt_str(out, " cached");
1009                                 if (ptr->unwritten)
1010                                         prt_str(out, " unwritten");
1011                                 if (ca && ptr_stale(ca, ptr))
1012                                         prt_printf(out, " stale");
1013                         }
1014                         break;
1015                 }
1016                 case BCH_EXTENT_ENTRY_crc32:
1017                 case BCH_EXTENT_ENTRY_crc64:
1018                 case BCH_EXTENT_ENTRY_crc128: {
1019                         struct bch_extent_crc_unpacked crc =
1020                                 bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1021
1022                         prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress ",
1023                                crc.compressed_size,
1024                                crc.uncompressed_size,
1025                                crc.offset, crc.nonce,
1026                                bch2_csum_types[crc.csum_type]);
1027                         bch2_prt_compression_type(out, crc.compression_type);
1028                         break;
1029                 }
1030                 case BCH_EXTENT_ENTRY_stripe_ptr: {
1031                         const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
1032
1033                         prt_printf(out, "ec: idx %llu block %u",
1034                                (u64) ec->idx, ec->block);
1035                         break;
1036                 }
1037                 case BCH_EXTENT_ENTRY_rebalance: {
1038                         const struct bch_extent_rebalance *r = &entry->rebalance;
1039
1040                         prt_str(out, "rebalance: target ");
1041                         if (c)
1042                                 bch2_target_to_text(out, c, r->target);
1043                         else
1044                                 prt_printf(out, "%u", r->target);
1045                         prt_str(out, " compression ");
1046                         bch2_compression_opt_to_text(out, r->compression);
1047                         break;
1048                 }
1049                 default:
1050                         prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1051                         return;
1052                 }
1053
1054                 first = false;
1055         }
1056 }
1057
1058 static int extent_ptr_invalid(struct bch_fs *c,
1059                               struct bkey_s_c k,
1060                               enum bkey_invalid_flags flags,
1061                               const struct bch_extent_ptr *ptr,
1062                               unsigned size_ondisk,
1063                               bool metadata,
1064                               struct printbuf *err)
1065 {
1066         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1067         u64 bucket;
1068         u32 bucket_offset;
1069         struct bch_dev *ca;
1070         int ret = 0;
1071
1072         if (!bch2_dev_exists2(c, ptr->dev)) {
1073                 /*
1074                  * If we're in the write path this key might have already been
1075                  * overwritten, and we could be seeing a device that doesn't
1076                  * exist anymore due to racing with device removal:
1077                  */
1078                 if (flags & BKEY_INVALID_WRITE)
1079                         return 0;
1080
1081                 bkey_fsck_err(c, err, ptr_to_invalid_device,
1082                            "pointer to invalid device (%u)", ptr->dev);
1083         }
1084
1085         ca = bch_dev_bkey_exists(c, ptr->dev);
1086         bkey_for_each_ptr(ptrs, ptr2)
1087                 bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
1088                                  ptr_to_duplicate_device,
1089                                  "multiple pointers to same device (%u)", ptr->dev);
1090
1091         bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1092
1093         bkey_fsck_err_on(bucket >= ca->mi.nbuckets, c, err,
1094                          ptr_after_last_bucket,
1095                          "pointer past last bucket (%llu > %llu)", bucket, ca->mi.nbuckets);
1096         bkey_fsck_err_on(ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket), c, err,
1097                          ptr_before_first_bucket,
1098                          "pointer before first bucket (%llu < %u)", bucket, ca->mi.first_bucket);
1099         bkey_fsck_err_on(bucket_offset + size_ondisk > ca->mi.bucket_size, c, err,
1100                          ptr_spans_multiple_buckets,
1101                          "pointer spans multiple buckets (%u + %u > %u)",
1102                        bucket_offset, size_ondisk, ca->mi.bucket_size);
1103 fsck_err:
1104         return ret;
1105 }
1106
1107 int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
1108                            enum bkey_invalid_flags flags,
1109                            struct printbuf *err)
1110 {
1111         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1112         const union bch_extent_entry *entry;
1113         struct bch_extent_crc_unpacked crc;
1114         unsigned size_ondisk = k.k->size;
1115         unsigned nonce = UINT_MAX;
1116         unsigned nr_ptrs = 0;
1117         bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
1118         int ret = 0;
1119
1120         if (bkey_is_btree_ptr(k.k))
1121                 size_ondisk = btree_sectors(c);
1122
1123         bkey_extent_entry_for_each(ptrs, entry) {
1124                 bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err,
1125                         extent_ptrs_invalid_entry,
1126                         "invalid extent entry type (got %u, max %u)",
1127                         __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1128
1129                 bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
1130                                  !extent_entry_is_ptr(entry), c, err,
1131                                  btree_ptr_has_non_ptr,
1132                                  "has non ptr field");
1133
1134                 switch (extent_entry_type(entry)) {
1135                 case BCH_EXTENT_ENTRY_ptr:
1136                         ret = extent_ptr_invalid(c, k, flags, &entry->ptr,
1137                                                  size_ondisk, false, err);
1138                         if (ret)
1139                                 return ret;
1140
1141                         bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err,
1142                                          ptr_cached_and_erasure_coded,
1143                                          "cached, erasure coded ptr");
1144
1145                         if (!entry->ptr.unwritten)
1146                                 have_written = true;
1147                         else
1148                                 have_unwritten = true;
1149
1150                         have_ec = false;
1151                         crc_since_last_ptr = false;
1152                         nr_ptrs++;
1153                         break;
1154                 case BCH_EXTENT_ENTRY_crc32:
1155                 case BCH_EXTENT_ENTRY_crc64:
1156                 case BCH_EXTENT_ENTRY_crc128:
1157                         crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1158
1159                         bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err,
1160                                          ptr_crc_uncompressed_size_too_small,
1161                                          "checksum offset + key size > uncompressed size");
1162                         bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err,
1163                                          ptr_crc_csum_type_unknown,
1164                                          "invalid checksum type");
1165                         bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err,
1166                                          ptr_crc_compression_type_unknown,
1167                                          "invalid compression type");
1168
1169                         if (bch2_csum_type_is_encryption(crc.csum_type)) {
1170                                 if (nonce == UINT_MAX)
1171                                         nonce = crc.offset + crc.nonce;
1172                                 else if (nonce != crc.offset + crc.nonce)
1173                                         bkey_fsck_err(c, err, ptr_crc_nonce_mismatch,
1174                                                       "incorrect nonce");
1175                         }
1176
1177                         bkey_fsck_err_on(crc_since_last_ptr, c, err,
1178                                          ptr_crc_redundant,
1179                                          "redundant crc entry");
1180                         crc_since_last_ptr = true;
1181
1182                         bkey_fsck_err_on(crc_is_encoded(crc) &&
1183                                          (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
1184                                          (flags & (BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT)), c, err,
1185                                          ptr_crc_uncompressed_size_too_big,
1186                                          "too large encoded extent");
1187
1188                         size_ondisk = crc.compressed_size;
1189                         break;
1190                 case BCH_EXTENT_ENTRY_stripe_ptr:
1191                         bkey_fsck_err_on(have_ec, c, err,
1192                                          ptr_stripe_redundant,
1193                                          "redundant stripe entry");
1194                         have_ec = true;
1195                         break;
1196                 case BCH_EXTENT_ENTRY_rebalance: {
1197                         const struct bch_extent_rebalance *r = &entry->rebalance;
1198
1199                         if (!bch2_compression_opt_valid(r->compression)) {
1200                                 struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
1201                                 prt_printf(err, "invalid compression opt %u:%u",
1202                                            opt.type, opt.level);
1203                                 return -BCH_ERR_invalid_bkey;
1204                         }
1205                         break;
1206                 }
1207                 }
1208         }
1209
1210         bkey_fsck_err_on(!nr_ptrs, c, err,
1211                          extent_ptrs_no_ptrs,
1212                          "no ptrs");
1213         bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err,
1214                          extent_ptrs_too_many_ptrs,
1215                          "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
1216         bkey_fsck_err_on(have_written && have_unwritten, c, err,
1217                          extent_ptrs_written_and_unwritten,
1218                          "extent with unwritten and written ptrs");
1219         bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err,
1220                          extent_ptrs_unwritten,
1221                          "has unwritten ptrs");
1222         bkey_fsck_err_on(crc_since_last_ptr, c, err,
1223                          extent_ptrs_redundant_crc,
1224                          "redundant crc entry");
1225         bkey_fsck_err_on(have_ec, c, err,
1226                          extent_ptrs_redundant_stripe,
1227                          "redundant stripe entry");
1228 fsck_err:
1229         return ret;
1230 }
1231
1232 void bch2_ptr_swab(struct bkey_s k)
1233 {
1234         struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1235         union bch_extent_entry *entry;
1236         u64 *d;
1237
1238         for (d =  (u64 *) ptrs.start;
1239              d != (u64 *) ptrs.end;
1240              d++)
1241                 *d = swab64(*d);
1242
1243         for (entry = ptrs.start;
1244              entry < ptrs.end;
1245              entry = extent_entry_next(entry)) {
1246                 switch (extent_entry_type(entry)) {
1247                 case BCH_EXTENT_ENTRY_ptr:
1248                         break;
1249                 case BCH_EXTENT_ENTRY_crc32:
1250                         entry->crc32.csum = swab32(entry->crc32.csum);
1251                         break;
1252                 case BCH_EXTENT_ENTRY_crc64:
1253                         entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1254                         entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1255                         break;
1256                 case BCH_EXTENT_ENTRY_crc128:
1257                         entry->crc128.csum.hi = (__force __le64)
1258                                 swab64((__force u64) entry->crc128.csum.hi);
1259                         entry->crc128.csum.lo = (__force __le64)
1260                                 swab64((__force u64) entry->crc128.csum.lo);
1261                         break;
1262                 case BCH_EXTENT_ENTRY_stripe_ptr:
1263                         break;
1264                 case BCH_EXTENT_ENTRY_rebalance:
1265                         break;
1266                 }
1267         }
1268 }
1269
1270 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
1271 {
1272         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1273         const union bch_extent_entry *entry;
1274
1275         bkey_extent_entry_for_each(ptrs, entry)
1276                 if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
1277                         return &entry->rebalance;
1278
1279         return NULL;
1280 }
1281
1282 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1283                                        unsigned target, unsigned compression)
1284 {
1285         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1286         unsigned rewrite_ptrs = 0;
1287
1288         if (compression) {
1289                 unsigned compression_type = bch2_compression_opt_to_type(compression);
1290                 const union bch_extent_entry *entry;
1291                 struct extent_ptr_decoded p;
1292                 unsigned i = 0;
1293
1294                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1295                         if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1296                             p.ptr.unwritten) {
1297                                 rewrite_ptrs = 0;
1298                                 goto incompressible;
1299                         }
1300
1301                         if (!p.ptr.cached && p.crc.compression_type != compression_type)
1302                                 rewrite_ptrs |= 1U << i;
1303                         i++;
1304                 }
1305         }
1306 incompressible:
1307         if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1308                 unsigned i = 0;
1309
1310                 bkey_for_each_ptr(ptrs, ptr) {
1311                         if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
1312                                 rewrite_ptrs |= 1U << i;
1313                         i++;
1314                 }
1315         }
1316
1317         return rewrite_ptrs;
1318 }
1319
1320 bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
1321 {
1322         const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1323
1324         /*
1325          * If it's an indirect extent, we don't delete the rebalance entry when
1326          * done so that we know what options were applied - check if it still
1327          * needs work done:
1328          */
1329         if (r &&
1330             k.k->type == KEY_TYPE_reflink_v &&
1331             !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
1332                 r = NULL;
1333
1334         return r != NULL;
1335 }
1336
1337 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
1338                                   struct bch_io_opts *opts)
1339 {
1340         struct bkey_s k = bkey_i_to_s(_k);
1341         struct bch_extent_rebalance *r;
1342         unsigned target = opts->background_target;
1343         unsigned compression = background_compression(*opts);
1344         bool needs_rebalance;
1345
1346         if (!bkey_extent_is_direct_data(k.k))
1347                 return 0;
1348
1349         /* get existing rebalance entry: */
1350         r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
1351         if (r) {
1352                 if (k.k->type == KEY_TYPE_reflink_v) {
1353                         /*
1354                          * indirect extents: existing options take precedence,
1355                          * so that we don't move extents back and forth if
1356                          * they're referenced by different inodes with different
1357                          * options:
1358                          */
1359                         if (r->target)
1360                                 target = r->target;
1361                         if (r->compression)
1362                                 compression = r->compression;
1363                 }
1364
1365                 r->target       = target;
1366                 r->compression  = compression;
1367         }
1368
1369         needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
1370
1371         if (needs_rebalance && !r) {
1372                 union bch_extent_entry *new = bkey_val_end(k);
1373
1374                 new->rebalance.type             = 1U << BCH_EXTENT_ENTRY_rebalance;
1375                 new->rebalance.compression      = compression;
1376                 new->rebalance.target           = target;
1377                 new->rebalance.unused           = 0;
1378                 k.k->u64s += extent_entry_u64s(new);
1379         } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
1380                 /*
1381                  * For indirect extents, don't delete the rebalance entry when
1382                  * we're finished so that we know we specifically moved it or
1383                  * compressed it to its current location/compression type
1384                  */
1385                 extent_entry_drop(k, (union bch_extent_entry *) r);
1386         }
1387
1388         return 0;
1389 }
1390
1391 /* Generic extent code: */
1392
1393 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1394 {
1395         unsigned new_val_u64s = bkey_val_u64s(k.k);
1396         int val_u64s_delta;
1397         u64 sub;
1398
1399         if (bkey_le(where, bkey_start_pos(k.k)))
1400                 return 0;
1401
1402         EBUG_ON(bkey_gt(where, k.k->p));
1403
1404         sub = where.offset - bkey_start_offset(k.k);
1405
1406         k.k->size -= sub;
1407
1408         if (!k.k->size) {
1409                 k.k->type = KEY_TYPE_deleted;
1410                 new_val_u64s = 0;
1411         }
1412
1413         switch (k.k->type) {
1414         case KEY_TYPE_extent:
1415         case KEY_TYPE_reflink_v: {
1416                 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1417                 union bch_extent_entry *entry;
1418                 bool seen_crc = false;
1419
1420                 bkey_extent_entry_for_each(ptrs, entry) {
1421                         switch (extent_entry_type(entry)) {
1422                         case BCH_EXTENT_ENTRY_ptr:
1423                                 if (!seen_crc)
1424                                         entry->ptr.offset += sub;
1425                                 break;
1426                         case BCH_EXTENT_ENTRY_crc32:
1427                                 entry->crc32.offset += sub;
1428                                 break;
1429                         case BCH_EXTENT_ENTRY_crc64:
1430                                 entry->crc64.offset += sub;
1431                                 break;
1432                         case BCH_EXTENT_ENTRY_crc128:
1433                                 entry->crc128.offset += sub;
1434                                 break;
1435                         case BCH_EXTENT_ENTRY_stripe_ptr:
1436                                 break;
1437                         case BCH_EXTENT_ENTRY_rebalance:
1438                                 break;
1439                         }
1440
1441                         if (extent_entry_is_crc(entry))
1442                                 seen_crc = true;
1443                 }
1444
1445                 break;
1446         }
1447         case KEY_TYPE_reflink_p: {
1448                 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1449
1450                 le64_add_cpu(&p.v->idx, sub);
1451                 break;
1452         }
1453         case KEY_TYPE_inline_data:
1454         case KEY_TYPE_indirect_inline_data: {
1455                 void *p = bkey_inline_data_p(k);
1456                 unsigned bytes = bkey_inline_data_bytes(k.k);
1457
1458                 sub = min_t(u64, sub << 9, bytes);
1459
1460                 memmove(p, p + sub, bytes - sub);
1461
1462                 new_val_u64s -= sub >> 3;
1463                 break;
1464         }
1465         }
1466
1467         val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1468         BUG_ON(val_u64s_delta < 0);
1469
1470         set_bkey_val_u64s(k.k, new_val_u64s);
1471         memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1472         return -val_u64s_delta;
1473 }
1474
1475 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1476 {
1477         unsigned new_val_u64s = bkey_val_u64s(k.k);
1478         int val_u64s_delta;
1479         u64 len = 0;
1480
1481         if (bkey_ge(where, k.k->p))
1482                 return 0;
1483
1484         EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1485
1486         len = where.offset - bkey_start_offset(k.k);
1487
1488         k.k->p.offset = where.offset;
1489         k.k->size = len;
1490
1491         if (!len) {
1492                 k.k->type = KEY_TYPE_deleted;
1493                 new_val_u64s = 0;
1494         }
1495
1496         switch (k.k->type) {
1497         case KEY_TYPE_inline_data:
1498         case KEY_TYPE_indirect_inline_data:
1499                 new_val_u64s = (bkey_inline_data_offset(k.k) +
1500                                 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1501                 break;
1502         }
1503
1504         val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1505         BUG_ON(val_u64s_delta < 0);
1506
1507         set_bkey_val_u64s(k.k, new_val_u64s);
1508         memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1509         return -val_u64s_delta;
1510 }