]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extents.c
Update bcachefs sources to 6a361fb68c bcachefs: Rework btree read error handling
[bcachefs-tools-debian] / libbcachefs / extents.c
1 /*
2  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3  *
4  * Code for managing the extent btree and dynamically updating the writeback
5  * dirty sector count.
6  */
7
8 #include "bcachefs.h"
9 #include "bkey_methods.h"
10 #include "btree_gc.h"
11 #include "btree_update.h"
12 #include "btree_update_interior.h"
13 #include "buckets.h"
14 #include "checksum.h"
15 #include "debug.h"
16 #include "dirent.h"
17 #include "error.h"
18 #include "extents.h"
19 #include "inode.h"
20 #include "journal.h"
21 #include "super-io.h"
22 #include "xattr.h"
23
24 #include <trace/events/bcachefs.h>
25
26 static enum merge_result bch2_extent_merge(struct bch_fs *, struct btree *,
27                                            struct bkey_i *, struct bkey_i *);
28
29 static void sort_key_next(struct btree_node_iter *iter,
30                           struct btree *b,
31                           struct btree_node_iter_set *i)
32 {
33         i->k += __btree_node_offset_to_key(b, i->k)->u64s;
34
35         if (i->k == i->end)
36                 *i = iter->data[--iter->used];
37 }
38
39 /*
40  * Returns true if l > r - unless l == r, in which case returns true if l is
41  * older than r.
42  *
43  * Necessary for btree_sort_fixup() - if there are multiple keys that compare
44  * equal in different sets, we have to process them newest to oldest.
45  */
46 #define key_sort_cmp(h, l, r)                                           \
47 ({                                                                      \
48         bkey_cmp_packed(b,                                              \
49                         __btree_node_offset_to_key(b, (l).k),           \
50                         __btree_node_offset_to_key(b, (r).k))           \
51                                                                         \
52         ?: (l).k - (r).k;                                               \
53 })
54
55 static inline bool should_drop_next_key(struct btree_node_iter *iter,
56                                         struct btree *b)
57 {
58         struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
59         struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
60
61         if (bkey_whiteout(k))
62                 return true;
63
64         if (iter->used < 2)
65                 return false;
66
67         if (iter->used > 2 &&
68             key_sort_cmp(iter, r[0], r[1]) >= 0)
69                 r++;
70
71         /*
72          * key_sort_cmp() ensures that when keys compare equal the older key
73          * comes first; so if l->k compares equal to r->k then l->k is older and
74          * should be dropped.
75          */
76         return !bkey_cmp_packed(b,
77                                 __btree_node_offset_to_key(b, l->k),
78                                 __btree_node_offset_to_key(b, r->k));
79 }
80
81 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
82                                                   struct btree *b,
83                                                   struct btree_node_iter *iter)
84 {
85         struct bkey_packed *out = dst->start;
86         struct btree_nr_keys nr;
87
88         memset(&nr, 0, sizeof(nr));
89
90         heap_resort(iter, key_sort_cmp);
91
92         while (!bch2_btree_node_iter_end(iter)) {
93                 if (!should_drop_next_key(iter, b)) {
94                         struct bkey_packed *k =
95                                 __btree_node_offset_to_key(b, iter->data->k);
96
97                         bkey_copy(out, k);
98                         btree_keys_account_key_add(&nr, 0, out);
99                         out = bkey_next(out);
100                 }
101
102                 sort_key_next(iter, b, iter->data);
103                 heap_sift_down(iter, 0, key_sort_cmp);
104         }
105
106         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
107         return nr;
108 }
109
110 /* Common among btree and extent ptrs */
111
112 const struct bch_extent_ptr *
113 bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
114 {
115         const struct bch_extent_ptr *ptr;
116
117         extent_for_each_ptr(e, ptr)
118                 if (ptr->dev == dev)
119                         return ptr;
120
121         return NULL;
122 }
123
124 unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e)
125 {
126         const struct bch_extent_ptr *ptr;
127         unsigned nr_ptrs = 0;
128
129         extent_for_each_ptr(e, ptr)
130                 nr_ptrs++;
131
132         return nr_ptrs;
133 }
134
135 unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c k)
136 {
137         struct bkey_s_c_extent e;
138         const struct bch_extent_ptr *ptr;
139         unsigned nr_ptrs = 0;
140
141         switch (k.k->type) {
142         case BCH_EXTENT:
143         case BCH_EXTENT_CACHED:
144                 e = bkey_s_c_to_extent(k);
145
146                 extent_for_each_ptr(e, ptr)
147                         nr_ptrs += !ptr->cached;
148                 break;
149
150         case BCH_RESERVATION:
151                 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
152                 break;
153         }
154
155         return nr_ptrs;
156 }
157
158 /* Doesn't cleanup redundant crcs */
159 void __bch2_extent_drop_ptr(struct bkey_s_extent e, struct bch_extent_ptr *ptr)
160 {
161         EBUG_ON(ptr < &e.v->start->ptr ||
162                 ptr >= &extent_entry_last(e)->ptr);
163         EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
164         memmove_u64s_down(ptr, ptr + 1,
165                           (u64 *) extent_entry_last(e) - (u64 *) (ptr + 1));
166         e.k->u64s -= sizeof(*ptr) / sizeof(u64);
167 }
168
169 void bch2_extent_drop_ptr(struct bkey_s_extent e, struct bch_extent_ptr *ptr)
170 {
171         __bch2_extent_drop_ptr(e, ptr);
172         bch2_extent_drop_redundant_crcs(e);
173 }
174
175 void bch2_extent_drop_ptr_idx(struct bkey_s_extent e, unsigned idx)
176 {
177         struct bch_extent_ptr *ptr;
178         unsigned i = 0;
179
180         extent_for_each_ptr(e, ptr)
181                 if (i++ == idx)
182                         goto found;
183
184         BUG();
185 found:
186         bch2_extent_drop_ptr(e, ptr);
187 }
188
189 /* returns true if equal */
190 static bool crc_cmp(union bch_extent_crc *l, union bch_extent_crc *r)
191 {
192         return extent_crc_type(l) == extent_crc_type(r) &&
193                 !memcmp(l, r, extent_entry_bytes(to_entry(l)));
194 }
195
196 /* Increment pointers after @crc by crc's offset until the next crc entry: */
197 void bch2_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc *crc)
198 {
199         union bch_extent_entry *entry;
200
201         extent_for_each_entry_from(e, entry, extent_entry_next(to_entry(crc))) {
202                 if (!extent_entry_is_ptr(entry))
203                         return;
204
205                 entry->ptr.offset += crc_offset(crc);
206         }
207 }
208
209 /*
210  * We're writing another replica for this extent, so while we've got the data in
211  * memory we'll be computing a new checksum for the currently live data.
212  *
213  * If there are other replicas we aren't moving, and they are checksummed but
214  * not compressed, we can modify them to point to only the data that is
215  * currently live (so that readers won't have to bounce) while we've got the
216  * checksum we need:
217  *
218  * XXX: to guard against data being corrupted while in memory, instead of
219  * recomputing the checksum here, it would be better in the read path to instead
220  * of computing the checksum of the entire extent:
221  *
222  * | extent                              |
223  *
224  * compute the checksums of the live and dead data separately
225  * | dead data || live data || dead data |
226  *
227  * and then verify that crc_dead1 + crc_live + crc_dead2 == orig_crc, and then
228  * use crc_live here (that we verified was correct earlier)
229  *
230  * note: doesn't work with encryption
231  */
232 void bch2_extent_narrow_crcs(struct bkey_s_extent e)
233 {
234         union bch_extent_crc *crc;
235         bool have_wide = false, have_narrow = false;
236         struct bch_csum csum = { 0 };
237         unsigned csum_type = 0;
238
239         extent_for_each_crc(e, crc) {
240                 if (crc_compression_type(crc) ||
241                     bch2_csum_type_is_encryption(crc_csum_type(crc)))
242                         continue;
243
244                 if (crc_uncompressed_size(e.k, crc) != e.k->size) {
245                         have_wide = true;
246                 } else {
247                         have_narrow = true;
248                         csum = crc_csum(crc);
249                         csum_type = crc_csum_type(crc);
250                 }
251         }
252
253         if (!have_wide || !have_narrow)
254                 return;
255
256         extent_for_each_crc(e, crc) {
257                 if (crc_compression_type(crc))
258                         continue;
259
260                 if (crc_uncompressed_size(e.k, crc) != e.k->size) {
261                         switch (extent_crc_type(crc)) {
262                         case BCH_EXTENT_CRC_NONE:
263                                 BUG();
264                         case BCH_EXTENT_CRC32:
265                                 if (bch_crc_bytes[csum_type] > 4)
266                                         continue;
267
268                                 bch2_extent_crc_narrow_pointers(e, crc);
269                                 crc->crc32._compressed_size     = e.k->size - 1;
270                                 crc->crc32._uncompressed_size   = e.k->size - 1;
271                                 crc->crc32.offset               = 0;
272                                 crc->crc32.csum_type            = csum_type;
273                                 crc->crc32.csum                 = csum.lo;
274                                 break;
275                         case BCH_EXTENT_CRC64:
276                                 if (bch_crc_bytes[csum_type] > 10)
277                                         continue;
278
279                                 bch2_extent_crc_narrow_pointers(e, crc);
280                                 crc->crc64._compressed_size     = e.k->size - 1;
281                                 crc->crc64._uncompressed_size   = e.k->size - 1;
282                                 crc->crc64.offset               = 0;
283                                 crc->crc64.csum_type            = csum_type;
284                                 crc->crc64.csum_lo              = csum.lo;
285                                 crc->crc64.csum_hi              = csum.hi;
286                                 break;
287                         case BCH_EXTENT_CRC128:
288                                 if (bch_crc_bytes[csum_type] > 16)
289                                         continue;
290
291                                 bch2_extent_crc_narrow_pointers(e, crc);
292                                 crc->crc128._compressed_size    = e.k->size - 1;
293                                 crc->crc128._uncompressed_size  = e.k->size - 1;
294                                 crc->crc128.offset              = 0;
295                                 crc->crc128.csum_type           = csum_type;
296                                 crc->crc128.csum                = csum;
297                                 break;
298                         }
299                 }
300         }
301 }
302
303 void bch2_extent_drop_redundant_crcs(struct bkey_s_extent e)
304 {
305         union bch_extent_entry *entry = e.v->start;
306         union bch_extent_crc *crc, *prev = NULL;
307
308         while (entry != extent_entry_last(e)) {
309                 union bch_extent_entry *next = extent_entry_next(entry);
310                 size_t crc_u64s = extent_entry_u64s(entry);
311
312                 if (!extent_entry_is_crc(entry))
313                         goto next;
314
315                 crc = entry_to_crc(entry);
316
317                 if (next == extent_entry_last(e)) {
318                         /* crc entry with no pointers after it: */
319                         goto drop;
320                 }
321
322                 if (extent_entry_is_crc(next)) {
323                         /* no pointers before next crc entry: */
324                         goto drop;
325                 }
326
327                 if (prev && crc_cmp(crc, prev)) {
328                         /* identical to previous crc entry: */
329                         goto drop;
330                 }
331
332                 if (!prev &&
333                     !crc_csum_type(crc) &&
334                     !crc_compression_type(crc)) {
335                         /* null crc entry: */
336                         bch2_extent_crc_narrow_pointers(e, crc);
337                         goto drop;
338                 }
339
340                 prev = crc;
341 next:
342                 entry = next;
343                 continue;
344 drop:
345                 memmove_u64s_down(crc, next,
346                                   (u64 *) extent_entry_last(e) - (u64 *) next);
347                 e.k->u64s -= crc_u64s;
348         }
349
350         EBUG_ON(bkey_val_u64s(e.k) && !bch2_extent_nr_ptrs(e.c));
351 }
352
353 static bool should_drop_ptr(const struct bch_fs *c,
354                             struct bkey_s_c_extent e,
355                             const struct bch_extent_ptr *ptr)
356 {
357         return ptr->cached && ptr_stale(c->devs[ptr->dev], ptr);
358 }
359
360 static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
361 {
362         struct bch_extent_ptr *ptr = &e.v->start->ptr;
363         bool dropped = false;
364
365         while ((ptr = extent_ptr_next(e, ptr)))
366                 if (should_drop_ptr(c, e.c, ptr)) {
367                         __bch2_extent_drop_ptr(e, ptr);
368                         dropped = true;
369                 } else
370                         ptr++;
371
372         if (dropped)
373                 bch2_extent_drop_redundant_crcs(e);
374 }
375
376 static bool bch2_ptr_normalize(struct bch_fs *c, struct btree *bk,
377                               struct bkey_s k)
378 {
379         return bch2_extent_normalize(c, k);
380 }
381
382 static void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
383 {
384         switch (k->type) {
385         case BCH_EXTENT:
386         case BCH_EXTENT_CACHED: {
387                 union bch_extent_entry *entry;
388                 u64 *d = (u64 *) bkeyp_val(f, k);
389                 unsigned i;
390
391                 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
392                         d[i] = swab64(d[i]);
393
394                 for (entry = (union bch_extent_entry *) d;
395                      entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
396                      entry = extent_entry_next(entry)) {
397                         switch (extent_entry_type(entry)) {
398                         case BCH_EXTENT_ENTRY_crc32:
399                                 entry->crc32.csum = swab32(entry->crc32.csum);
400                                 break;
401                         case BCH_EXTENT_ENTRY_crc64:
402                                 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
403                                 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
404                                 break;
405                         case BCH_EXTENT_ENTRY_crc128:
406                                 entry->crc128.csum.hi = swab64(entry->crc64.csum_hi);
407                                 entry->crc128.csum.lo = swab64(entry->crc64.csum_lo);
408                                 break;
409                         case BCH_EXTENT_ENTRY_ptr:
410                                 break;
411                         }
412                 }
413                 break;
414         }
415         }
416 }
417
418 static const char *extent_ptr_invalid(const struct bch_fs *c,
419                                       struct bkey_s_c_extent e,
420                                       const struct bch_extent_ptr *ptr,
421                                       unsigned size_ondisk,
422                                       bool metadata)
423 {
424         const struct bch_extent_ptr *ptr2;
425         struct bch_dev *ca;
426
427         if (ptr->dev >= c->sb.nr_devices)
428                 return "pointer to invalid device";
429
430         ca = c->devs[ptr->dev];
431         if (!ca)
432                 return "pointer to invalid device";
433
434         extent_for_each_ptr(e, ptr2)
435                 if (ptr != ptr2 && ptr->dev == ptr2->dev)
436                         return "multiple pointers to same device";
437
438         if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
439                 return "offset past end of device";
440
441         if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
442                 return "offset before first bucket";
443
444         if (bucket_remainder(ca, ptr->offset) +
445             size_ondisk > ca->mi.bucket_size)
446                 return "spans multiple buckets";
447
448         return NULL;
449 }
450
451 static size_t extent_print_ptrs(struct bch_fs *c, char *buf,
452                                 size_t size, struct bkey_s_c_extent e)
453 {
454         char *out = buf, *end = buf + size;
455         const union bch_extent_entry *entry;
456         const union bch_extent_crc *crc;
457         const struct bch_extent_ptr *ptr;
458         struct bch_dev *ca;
459         bool first = true;
460
461 #define p(...)  (out += scnprintf(out, end - out, __VA_ARGS__))
462
463         extent_for_each_entry(e, entry) {
464                 if (!first)
465                         p(" ");
466
467                 switch (__extent_entry_type(entry)) {
468                 case BCH_EXTENT_ENTRY_crc32:
469                 case BCH_EXTENT_ENTRY_crc64:
470                 case BCH_EXTENT_ENTRY_crc128:
471                         crc = entry_to_crc(entry);
472
473                         p("crc: c_size %u size %u offset %u csum %u compress %u",
474                           crc_compressed_size(e.k, crc),
475                           crc_uncompressed_size(e.k, crc),
476                           crc_offset(crc), crc_csum_type(crc),
477                           crc_compression_type(crc));
478                         break;
479                 case BCH_EXTENT_ENTRY_ptr:
480                         ptr = entry_to_ptr(entry);
481                         ca = c->devs[ptr->dev];
482
483                         p("ptr: %u:%llu gen %u%s", ptr->dev,
484                           (u64) ptr->offset, ptr->gen,
485                           ca && ptr_stale(ca, ptr)
486                           ? " stale" : "");
487                         break;
488                 default:
489                         p("(invalid extent entry %.16llx)", *((u64 *) entry));
490                         goto out;
491                 }
492
493                 first = false;
494         }
495 out:
496         if (bkey_extent_is_cached(e.k))
497                 p(" cached");
498 #undef p
499         return out - buf;
500 }
501
502 static void extent_pick_read_device(struct bch_fs *c,
503                                     struct bkey_s_c_extent e,
504                                     struct bch_devs_mask *avoid,
505                                     struct extent_pick_ptr *pick)
506 {
507         const union bch_extent_crc *crc;
508         const struct bch_extent_ptr *ptr;
509
510         extent_for_each_ptr_crc(e, ptr, crc) {
511                 struct bch_dev *ca = c->devs[ptr->dev];
512
513                 if (ptr->cached && ptr_stale(ca, ptr))
514                         return;
515
516                 if (ca->mi.state == BCH_MEMBER_STATE_FAILED)
517                         return;
518
519                 if (avoid && test_bit(ca->dev_idx, avoid->d))
520                         return;
521
522                 if (pick->ca && pick->ca->mi.tier < ca->mi.tier)
523                         return;
524
525                 if (!percpu_ref_tryget(&ca->io_ref))
526                         return;
527
528                 if (pick->ca)
529                         percpu_ref_put(&pick->ca->io_ref);
530
531                 *pick = (struct extent_pick_ptr) {
532                         .ptr    = *ptr,
533                         .ca     = ca,
534                 };
535
536                 if (e.k->size)
537                         pick->crc = crc_to_128(e.k, crc);
538         }
539 }
540
541 /* Btree ptrs */
542
543 static const char *bch2_btree_ptr_invalid(const struct bch_fs *c,
544                                          struct bkey_s_c k)
545 {
546         if (bkey_extent_is_cached(k.k))
547                 return "cached";
548
549         if (k.k->size)
550                 return "nonzero key size";
551
552         if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
553                 return "value too big";
554
555         switch (k.k->type) {
556         case BCH_EXTENT: {
557                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
558                 const union bch_extent_entry *entry;
559                 const struct bch_extent_ptr *ptr;
560                 const union bch_extent_crc *crc;
561                 const char *reason;
562
563                 extent_for_each_entry(e, entry)
564                         if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
565                                 return "invalid extent entry type";
566
567                 extent_for_each_ptr_crc(e, ptr, crc) {
568                         reason = extent_ptr_invalid(c, e, ptr,
569                                                     c->opts.btree_node_size,
570                                                     true);
571                         if (reason)
572                                 return reason;
573                 }
574
575                 if (crc)
576                         return "has crc field";
577
578                 return NULL;
579         }
580
581         default:
582                 return "invalid value type";
583         }
584 }
585
586 static void btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
587                                  struct bkey_s_c k)
588 {
589         struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
590         const struct bch_extent_ptr *ptr;
591         unsigned seq;
592         const char *err;
593         char buf[160];
594         struct bucket *g;
595         struct bch_dev *ca;
596         unsigned replicas = 0;
597         bool bad;
598
599         extent_for_each_ptr(e, ptr) {
600                 ca = c->devs[ptr->dev];
601                 g = PTR_BUCKET(ca, ptr);
602                 replicas++;
603
604                 if (!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags))
605                         continue;
606
607                 err = "stale";
608                 if (ptr_stale(ca, ptr))
609                         goto err;
610
611                 do {
612                         seq = read_seqcount_begin(&c->gc_pos_lock);
613                         bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
614                                 (g->mark.data_type != BUCKET_BTREE ||
615                                  g->mark.dirty_sectors < c->opts.btree_node_size);
616                 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
617
618                 err = "inconsistent";
619                 if (bad)
620                         goto err;
621         }
622
623         if (!bch2_sb_has_replicas(c, e, BCH_DATA_BTREE)) {
624                 bch2_bkey_val_to_text(c, btree_node_type(b),
625                                      buf, sizeof(buf), k);
626                 bch2_fs_bug(c,
627                         "btree key bad (replicas not marked in superblock):\n%s",
628                         buf);
629                 return;
630         }
631
632         return;
633 err:
634         bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k);
635         bch2_fs_bug(c, "%s btree pointer %s: bucket %zi "
636                       "gen %i last_gc %i mark %08x",
637                       err, buf, PTR_BUCKET_NR(ca, ptr),
638                       PTR_BUCKET(ca, ptr)->mark.gen,
639                       ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
640                       (unsigned) g->mark.counter);
641 }
642
643 static void bch2_btree_ptr_to_text(struct bch_fs *c, char *buf,
644                                   size_t size, struct bkey_s_c k)
645 {
646         char *out = buf, *end = buf + size;
647         const char *invalid;
648
649 #define p(...)  (out += scnprintf(out, end - out, __VA_ARGS__))
650
651         if (bkey_extent_is_data(k.k))
652                 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
653
654         invalid = bch2_btree_ptr_invalid(c, k);
655         if (invalid)
656                 p(" invalid: %s", invalid);
657 #undef p
658 }
659
660 struct extent_pick_ptr
661 bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b,
662                     struct bch_devs_mask *avoid)
663 {
664         struct extent_pick_ptr pick = { .ca = NULL };
665
666         extent_pick_read_device(c, bkey_i_to_s_c_extent(&b->key),
667                                 avoid, &pick);
668
669         return pick;
670 }
671
672 const struct bkey_ops bch2_bkey_btree_ops = {
673         .key_invalid    = bch2_btree_ptr_invalid,
674         .key_debugcheck = btree_ptr_debugcheck,
675         .val_to_text    = bch2_btree_ptr_to_text,
676         .swab           = bch2_ptr_swab,
677 };
678
679 /* Extents */
680
681 static bool __bch2_cut_front(struct bpos where, struct bkey_s k)
682 {
683         u64 len = 0;
684
685         if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
686                 return false;
687
688         EBUG_ON(bkey_cmp(where, k.k->p) > 0);
689
690         len = k.k->p.offset - where.offset;
691
692         BUG_ON(len > k.k->size);
693
694         /*
695          * Don't readjust offset if the key size is now 0, because that could
696          * cause offset to point to the next bucket:
697          */
698         if (!len)
699                 __set_bkey_deleted(k.k);
700         else if (bkey_extent_is_data(k.k)) {
701                 struct bkey_s_extent e = bkey_s_to_extent(k);
702                 struct bch_extent_ptr *ptr;
703                 union bch_extent_crc *crc, *prev_crc = NULL;
704
705                 extent_for_each_ptr_crc(e, ptr, crc) {
706                         switch (extent_crc_type(crc)) {
707                         case BCH_EXTENT_CRC_NONE:
708                                 ptr->offset += e.k->size - len;
709                                 break;
710                         case BCH_EXTENT_CRC32:
711                                 if (prev_crc != crc)
712                                         crc->crc32.offset += e.k->size - len;
713                                 break;
714                         case BCH_EXTENT_CRC64:
715                                 if (prev_crc != crc)
716                                         crc->crc64.offset += e.k->size - len;
717                                 break;
718                         case BCH_EXTENT_CRC128:
719                                 if (prev_crc != crc)
720                                         crc->crc128.offset += e.k->size - len;
721                                 break;
722                         }
723                         prev_crc = crc;
724                 }
725         }
726
727         k.k->size = len;
728
729         return true;
730 }
731
732 bool bch2_cut_front(struct bpos where, struct bkey_i *k)
733 {
734         return __bch2_cut_front(where, bkey_i_to_s(k));
735 }
736
737 bool bch2_cut_back(struct bpos where, struct bkey *k)
738 {
739         u64 len = 0;
740
741         if (bkey_cmp(where, k->p) >= 0)
742                 return false;
743
744         EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
745
746         len = where.offset - bkey_start_offset(k);
747
748         BUG_ON(len > k->size);
749
750         k->p = where;
751         k->size = len;
752
753         if (!len)
754                 __set_bkey_deleted(k);
755
756         return true;
757 }
758
759 /**
760  * bch_key_resize - adjust size of @k
761  *
762  * bkey_start_offset(k) will be preserved, modifies where the extent ends
763  */
764 void bch2_key_resize(struct bkey *k,
765                     unsigned new_size)
766 {
767         k->p.offset -= k->size;
768         k->p.offset += new_size;
769         k->size = new_size;
770 }
771
772 /*
773  * In extent_sort_fix_overlapping(), insert_fixup_extent(),
774  * extent_merge_inline() - we're modifying keys in place that are packed. To do
775  * that we have to unpack the key, modify the unpacked key - then this
776  * copies/repacks the unpacked to the original as necessary.
777  */
778 static bool __extent_save(struct btree *b, struct btree_node_iter *iter,
779                           struct bkey_packed *dst, struct bkey *src)
780 {
781         struct bkey_format *f = &b->format;
782         struct bkey_i *dst_unpacked;
783         bool ret;
784
785         if ((dst_unpacked = packed_to_bkey(dst))) {
786                 dst_unpacked->k = *src;
787                 ret = true;
788         } else {
789                 ret = bch2_bkey_pack_key(dst, src, f);
790         }
791
792         if (ret && iter)
793                 bch2_verify_key_order(b, iter, dst);
794
795         return ret;
796 }
797
798 static void extent_save(struct btree *b, struct btree_node_iter *iter,
799                         struct bkey_packed *dst, struct bkey *src)
800 {
801         BUG_ON(!__extent_save(b, iter, dst, src));
802 }
803
804 /*
805  * If keys compare equal, compare by pointer order:
806  *
807  * Necessary for sort_fix_overlapping() - if there are multiple keys that
808  * compare equal in different sets, we have to process them newest to oldest.
809  */
810 #define extent_sort_cmp(h, l, r)                                        \
811 ({                                                                      \
812         struct bkey _ul = bkey_unpack_key(b,                            \
813                                 __btree_node_offset_to_key(b, (l).k));  \
814         struct bkey _ur = bkey_unpack_key(b,                            \
815                                 __btree_node_offset_to_key(b, (r).k));  \
816                                                                         \
817         bkey_cmp(bkey_start_pos(&_ul),                                  \
818                  bkey_start_pos(&_ur)) ?: (r).k - (l).k;                \
819 })
820
821 static inline void extent_sort_sift(struct btree_node_iter *iter,
822                                     struct btree *b, size_t i)
823 {
824         heap_sift_down(iter, i, extent_sort_cmp);
825 }
826
827 static inline void extent_sort_next(struct btree_node_iter *iter,
828                                     struct btree *b,
829                                     struct btree_node_iter_set *i)
830 {
831         sort_key_next(iter, b, i);
832         heap_sift_down(iter, i - iter->data, extent_sort_cmp);
833 }
834
835 static void extent_sort_append(struct bch_fs *c,
836                                struct btree *b,
837                                struct btree_nr_keys *nr,
838                                struct bkey_packed *start,
839                                struct bkey_packed **prev,
840                                struct bkey_packed *k)
841 {
842         struct bkey_format *f = &b->format;
843         BKEY_PADDED(k) tmp;
844
845         if (bkey_whiteout(k))
846                 return;
847
848         bch2_bkey_unpack(b, &tmp.k, k);
849
850         if (*prev &&
851             bch2_extent_merge(c, b, (void *) *prev, &tmp.k))
852                 return;
853
854         if (*prev) {
855                 bch2_bkey_pack(*prev, (void *) *prev, f);
856
857                 btree_keys_account_key_add(nr, 0, *prev);
858                 *prev = bkey_next(*prev);
859         } else {
860                 *prev = start;
861         }
862
863         bkey_copy(*prev, &tmp.k);
864 }
865
866 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
867                                         struct bset *dst,
868                                         struct btree *b,
869                                         struct btree_node_iter *iter)
870 {
871         struct bkey_format *f = &b->format;
872         struct btree_node_iter_set *_l = iter->data, *_r;
873         struct bkey_packed *prev = NULL, *out, *lk, *rk;
874         struct bkey l_unpacked, r_unpacked;
875         struct bkey_s l, r;
876         struct btree_nr_keys nr;
877
878         memset(&nr, 0, sizeof(nr));
879
880         heap_resort(iter, extent_sort_cmp);
881
882         while (!bch2_btree_node_iter_end(iter)) {
883                 lk = __btree_node_offset_to_key(b, _l->k);
884
885                 if (iter->used == 1) {
886                         extent_sort_append(c, b, &nr, dst->start, &prev, lk);
887                         extent_sort_next(iter, b, _l);
888                         continue;
889                 }
890
891                 _r = iter->data + 1;
892                 if (iter->used > 2 &&
893                     extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
894                         _r++;
895
896                 rk = __btree_node_offset_to_key(b, _r->k);
897
898                 l = __bkey_disassemble(b, lk, &l_unpacked);
899                 r = __bkey_disassemble(b, rk, &r_unpacked);
900
901                 /* If current key and next key don't overlap, just append */
902                 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
903                         extent_sort_append(c, b, &nr, dst->start, &prev, lk);
904                         extent_sort_next(iter, b, _l);
905                         continue;
906                 }
907
908                 /* Skip 0 size keys */
909                 if (!r.k->size) {
910                         extent_sort_next(iter, b, _r);
911                         continue;
912                 }
913
914                 /*
915                  * overlap: keep the newer key and trim the older key so they
916                  * don't overlap. comparing pointers tells us which one is
917                  * newer, since the bsets are appended one after the other.
918                  */
919
920                 /* can't happen because of comparison func */
921                 BUG_ON(_l->k < _r->k &&
922                        !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
923
924                 if (_l->k > _r->k) {
925                         /* l wins, trim r */
926                         if (bkey_cmp(l.k->p, r.k->p) >= 0) {
927                                 sort_key_next(iter, b, _r);
928                         } else {
929                                 __bch2_cut_front(l.k->p, r);
930                                 extent_save(b, NULL, rk, r.k);
931                         }
932
933                         extent_sort_sift(iter, b, _r - iter->data);
934                 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
935                         BKEY_PADDED(k) tmp;
936
937                         /*
938                          * r wins, but it overlaps in the middle of l - split l:
939                          */
940                         bkey_reassemble(&tmp.k, l.s_c);
941                         bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
942
943                         __bch2_cut_front(r.k->p, l);
944                         extent_save(b, NULL, lk, l.k);
945
946                         extent_sort_sift(iter, b, 0);
947
948                         extent_sort_append(c, b, &nr, dst->start, &prev,
949                                            bkey_to_packed(&tmp.k));
950                 } else {
951                         bch2_cut_back(bkey_start_pos(r.k), l.k);
952                         extent_save(b, NULL, lk, l.k);
953                 }
954         }
955
956         if (prev) {
957                 bch2_bkey_pack(prev, (void *) prev, f);
958                 btree_keys_account_key_add(&nr, 0, prev);
959                 out = bkey_next(prev);
960         } else {
961                 out = dst->start;
962         }
963
964         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
965         return nr;
966 }
967
968 struct extent_insert_state {
969         struct btree_insert             *trans;
970         struct btree_insert_entry       *insert;
971         struct bpos                     committed;
972         struct bch_fs_usage             stats;
973
974         /* for deleting: */
975         struct bkey_i                   whiteout;
976         bool                            do_journal;
977         bool                            deleting;
978 };
979
980 static void bch2_add_sectors(struct extent_insert_state *s,
981                             struct bkey_s_c k, u64 offset, s64 sectors)
982 {
983         struct bch_fs *c = s->trans->c;
984         struct btree *b = s->insert->iter->nodes[0];
985
986         EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
987
988         if (!sectors)
989                 return;
990
991         bch2_mark_key(c, k, sectors, false, gc_pos_btree_node(b),
992                      &s->stats, s->trans->journal_res.seq);
993 }
994
995 static void bch2_subtract_sectors(struct extent_insert_state *s,
996                                  struct bkey_s_c k, u64 offset, s64 sectors)
997 {
998         bch2_add_sectors(s, k, offset, -sectors);
999 }
1000
1001 /* These wrappers subtract exactly the sectors that we're removing from @k */
1002 static void bch2_cut_subtract_back(struct extent_insert_state *s,
1003                                   struct bpos where, struct bkey_s k)
1004 {
1005         bch2_subtract_sectors(s, k.s_c, where.offset,
1006                              k.k->p.offset - where.offset);
1007         bch2_cut_back(where, k.k);
1008 }
1009
1010 static void bch2_cut_subtract_front(struct extent_insert_state *s,
1011                                    struct bpos where, struct bkey_s k)
1012 {
1013         bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
1014                              where.offset - bkey_start_offset(k.k));
1015         __bch2_cut_front(where, k);
1016 }
1017
1018 static void bch2_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
1019 {
1020         if (k.k->size)
1021                 bch2_subtract_sectors(s, k.s_c,
1022                                      bkey_start_offset(k.k), k.k->size);
1023         k.k->size = 0;
1024         __set_bkey_deleted(k.k);
1025 }
1026
1027 static bool bch2_extent_merge_inline(struct bch_fs *,
1028                                      struct btree_iter *,
1029                                      struct bkey_packed *,
1030                                      struct bkey_packed *,
1031                                      bool);
1032
1033 #define MAX_LOCK_HOLD_TIME      (5 * NSEC_PER_MSEC)
1034
1035 static enum btree_insert_ret
1036 extent_insert_should_stop(struct extent_insert_state *s)
1037 {
1038         struct btree *b = s->insert->iter->nodes[0];
1039
1040         /*
1041          * Check if we have sufficient space in both the btree node and the
1042          * journal reservation:
1043          *
1044          * Each insert checks for room in the journal entry, but we check for
1045          * room in the btree node up-front. In the worst case, bkey_cmpxchg()
1046          * will insert two keys, and one iteration of this room will insert one
1047          * key, so we need room for three keys.
1048          */
1049         if (!bch2_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s))
1050                 return BTREE_INSERT_BTREE_NODE_FULL;
1051         else if (!journal_res_insert_fits(s->trans, s->insert))
1052                 return BTREE_INSERT_JOURNAL_RES_FULL; /* XXX worth tracing */
1053         else
1054                 return BTREE_INSERT_OK;
1055 }
1056
1057 static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
1058                                struct bkey_i *insert)
1059 {
1060         struct btree *b = iter->nodes[0];
1061         struct btree_node_iter *node_iter = &iter->node_iters[0];
1062         struct bset_tree *t = bset_tree_last(b);
1063         struct bkey_packed *where =
1064                 bch2_btree_node_iter_bset_pos(node_iter, b, t);
1065         struct bkey_packed *prev = bch2_bkey_prev(b, t, where);
1066         struct bkey_packed *next_live_key = where;
1067         unsigned clobber_u64s;
1068
1069         if (prev)
1070                 where = bkey_next(prev);
1071
1072         while (next_live_key != btree_bkey_last(b, t) &&
1073                bkey_deleted(next_live_key))
1074                 next_live_key = bkey_next(next_live_key);
1075
1076         /*
1077          * Everything between where and next_live_key is now deleted keys, and
1078          * is overwritten:
1079          */
1080         clobber_u64s = (u64 *) next_live_key - (u64 *) where;
1081
1082         if (prev &&
1083             bch2_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
1084                 goto drop_deleted_keys;
1085
1086         if (next_live_key != btree_bkey_last(b, t) &&
1087             bch2_extent_merge_inline(c, iter, bkey_to_packed(insert),
1088                                     next_live_key, false))
1089                 goto drop_deleted_keys;
1090
1091         bch2_bset_insert(b, node_iter, where, insert, clobber_u64s);
1092         bch2_btree_node_iter_fix(iter, b, node_iter, t, where,
1093                                 clobber_u64s, where->u64s);
1094         return;
1095 drop_deleted_keys:
1096         bch2_bset_delete(b, where, clobber_u64s);
1097         bch2_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0);
1098 }
1099
1100 static void extent_insert_committed(struct extent_insert_state *s)
1101 {
1102         struct bch_fs *c = s->trans->c;
1103         struct btree_iter *iter = s->insert->iter;
1104         struct bkey_i *insert = !s->deleting
1105                 ? s->insert->k
1106                 : &s->whiteout;
1107         BKEY_PADDED(k) split;
1108
1109         EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
1110         EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
1111
1112         if (!bkey_cmp(s->committed, bkey_start_pos(&insert->k)))
1113                 return;
1114
1115         if (s->deleting && !s->do_journal) {
1116                 bch2_cut_front(s->committed, insert);
1117                 goto done;
1118         }
1119
1120         EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
1121
1122         bkey_copy(&split.k, insert);
1123
1124         if (!(s->trans->flags & BTREE_INSERT_JOURNAL_REPLAY) &&
1125             bkey_cmp(s->committed, insert->k.p) &&
1126             bkey_extent_is_compressed(bkey_i_to_s_c(insert))) {
1127                 /* XXX: possibly need to increase our reservation? */
1128                 bch2_cut_subtract_back(s, s->committed,
1129                                       bkey_i_to_s(&split.k));
1130                 bch2_cut_front(s->committed, insert);
1131                 bch2_add_sectors(s, bkey_i_to_s_c(insert),
1132                                 bkey_start_offset(&insert->k),
1133                                 insert->k.size);
1134         } else {
1135                 bch2_cut_back(s->committed, &split.k.k);
1136                 bch2_cut_front(s->committed, insert);
1137         }
1138
1139         if (debug_check_bkeys(c))
1140                 bch2_bkey_debugcheck(c, iter->nodes[iter->level],
1141                                 bkey_i_to_s_c(&split.k));
1142
1143         bch2_btree_journal_key(s->trans, iter, &split.k);
1144
1145         if (!s->deleting)
1146                 extent_bset_insert(c, iter, &split.k);
1147 done:
1148         bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1149
1150         insert->k.needs_whiteout        = false;
1151         s->do_journal                   = false;
1152         s->trans->did_work              = true;
1153 }
1154
1155 static enum extent_insert_hook_ret
1156 __extent_insert_advance_pos(struct extent_insert_state *s,
1157                             struct bpos next_pos,
1158                             struct bkey_s_c k)
1159 {
1160         struct extent_insert_hook *hook = s->trans->hook;
1161         enum extent_insert_hook_ret ret;
1162 #if 0
1163         /*
1164          * Currently disabled for encryption - broken with fcollapse. Will have
1165          * to reenable when versions are exposed for send/receive - versions
1166          * will have to be monotonic then:
1167          */
1168         if (k.k && k.k->size &&
1169             !bversion_zero(s->insert->k->k.version) &&
1170             bversion_cmp(k.k->version, s->insert->k->k.version) > 0) {
1171                 ret = BTREE_HOOK_NO_INSERT;
1172         } else
1173 #endif
1174         if (hook)
1175                 ret = hook->fn(hook, s->committed, next_pos, k, s->insert->k);
1176         else
1177                 ret = BTREE_HOOK_DO_INSERT;
1178
1179         EBUG_ON(bkey_deleted(&s->insert->k->k) || !s->insert->k->k.size);
1180
1181         switch (ret) {
1182         case BTREE_HOOK_DO_INSERT:
1183                 break;
1184         case BTREE_HOOK_NO_INSERT:
1185                 extent_insert_committed(s);
1186                 bch2_cut_subtract_front(s, next_pos, bkey_i_to_s(s->insert->k));
1187
1188                 bch2_btree_iter_set_pos_same_leaf(s->insert->iter, next_pos);
1189                 break;
1190         case BTREE_HOOK_RESTART_TRANS:
1191                 return ret;
1192         }
1193
1194         s->committed = next_pos;
1195         return ret;
1196 }
1197
1198 /*
1199  * Update iter->pos, marking how much of @insert we've processed, and call hook
1200  * fn:
1201  */
1202 static enum extent_insert_hook_ret
1203 extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
1204 {
1205         struct btree *b = s->insert->iter->nodes[0];
1206         struct bpos next_pos = bpos_min(s->insert->k->k.p,
1207                                         k.k ? k.k->p : b->key.k.p);
1208
1209         /* hole? */
1210         if (k.k && bkey_cmp(s->committed, bkey_start_pos(k.k)) < 0) {
1211                 bool have_uncommitted = bkey_cmp(s->committed,
1212                                 bkey_start_pos(&s->insert->k->k)) > 0;
1213
1214                 switch (__extent_insert_advance_pos(s, bkey_start_pos(k.k),
1215                                                     bkey_s_c_null)) {
1216                 case BTREE_HOOK_DO_INSERT:
1217                         break;
1218                 case BTREE_HOOK_NO_INSERT:
1219                         /*
1220                          * we had to split @insert and insert the committed
1221                          * part - need to bail out and recheck journal
1222                          * reservation/btree node before we advance pos past @k:
1223                          */
1224                         if (have_uncommitted)
1225                                 return BTREE_HOOK_NO_INSERT;
1226                         break;
1227                 case BTREE_HOOK_RESTART_TRANS:
1228                         return BTREE_HOOK_RESTART_TRANS;
1229                 }
1230         }
1231
1232         /* avoid redundant calls to hook fn: */
1233         if (!bkey_cmp(s->committed, next_pos))
1234                 return BTREE_HOOK_DO_INSERT;
1235
1236         return __extent_insert_advance_pos(s, next_pos, k);
1237 }
1238
1239 static enum btree_insert_ret
1240 extent_insert_check_split_compressed(struct extent_insert_state *s,
1241                                      struct bkey_s_c k,
1242                                      enum bch_extent_overlap overlap)
1243 {
1244         struct bch_fs *c = s->trans->c;
1245         unsigned sectors;
1246
1247         if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
1248             (sectors = bkey_extent_is_compressed(k))) {
1249                 int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
1250
1251                 if (s->trans->flags & BTREE_INSERT_NOFAIL)
1252                         flags |= BCH_DISK_RESERVATION_NOFAIL;
1253
1254                 switch (bch2_disk_reservation_add(c,
1255                                 s->trans->disk_res,
1256                                 sectors, flags)) {
1257                 case 0:
1258                         break;
1259                 case -ENOSPC:
1260                         return BTREE_INSERT_ENOSPC;
1261                 case -EINTR:
1262                         return BTREE_INSERT_NEED_GC_LOCK;
1263                 default:
1264                         BUG();
1265                 }
1266         }
1267
1268         return BTREE_INSERT_OK;
1269 }
1270
1271 static enum btree_insert_ret
1272 extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
1273               struct bset_tree *t, struct bkey_packed *_k, struct bkey_s k,
1274               enum bch_extent_overlap overlap)
1275 {
1276         struct bch_fs *c = s->trans->c;
1277         struct btree_iter *iter = s->insert->iter;
1278         struct btree *b = iter->nodes[0];
1279         struct btree_node_iter *node_iter = &iter->node_iters[0];
1280
1281         switch (overlap) {
1282         case BCH_EXTENT_OVERLAP_FRONT:
1283                 /* insert overlaps with start of k: */
1284                 bch2_cut_subtract_front(s, insert->k.p, k);
1285                 BUG_ON(bkey_deleted(k.k));
1286                 extent_save(b, node_iter, _k, k.k);
1287                 break;
1288
1289         case BCH_EXTENT_OVERLAP_BACK:
1290                 /* insert overlaps with end of k: */
1291                 bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
1292                 BUG_ON(bkey_deleted(k.k));
1293                 extent_save(b, node_iter, _k, k.k);
1294
1295                 /*
1296                  * As the auxiliary tree is indexed by the end of the
1297                  * key and we've just changed the end, update the
1298                  * auxiliary tree.
1299                  */
1300                 bch2_bset_fix_invalidated_key(b, t, _k);
1301                 bch2_btree_node_iter_fix(iter, b, node_iter, t,
1302                                         _k, _k->u64s, _k->u64s);
1303                 break;
1304
1305         case BCH_EXTENT_OVERLAP_ALL: {
1306                 struct bpos orig_pos = k.k->p;
1307
1308                 /* The insert key completely covers k, invalidate k */
1309                 if (!bkey_whiteout(k.k))
1310                         btree_keys_account_key_drop(&b->nr,
1311                                                 t - b->set, _k);
1312
1313                 bch2_drop_subtract(s, k);
1314                 k.k->p = bkey_start_pos(&insert->k);
1315                 if (!__extent_save(b, node_iter, _k, k.k)) {
1316                         /*
1317                          * Couldn't repack: we aren't necessarily able
1318                          * to repack if the new key is outside the range
1319                          * of the old extent, so we have to split
1320                          * @insert:
1321                          */
1322                         k.k->p = orig_pos;
1323                         extent_save(b, node_iter, _k, k.k);
1324
1325                         if (extent_insert_advance_pos(s, k.s_c) ==
1326                             BTREE_HOOK_RESTART_TRANS)
1327                                 return BTREE_INSERT_NEED_TRAVERSE;
1328
1329                         extent_insert_committed(s);
1330                         /*
1331                          * We split and inserted upto at k.k->p - that
1332                          * has to coincide with iter->pos, so that we
1333                          * don't have anything more we have to insert
1334                          * until we recheck our journal reservation:
1335                          */
1336                         EBUG_ON(bkey_cmp(s->committed, k.k->p));
1337                 } else {
1338                         bch2_bset_fix_invalidated_key(b, t, _k);
1339                         bch2_btree_node_iter_fix(iter, b, node_iter, t,
1340                                                 _k, _k->u64s, _k->u64s);
1341                 }
1342
1343                 break;
1344         }
1345         case BCH_EXTENT_OVERLAP_MIDDLE: {
1346                 BKEY_PADDED(k) split;
1347                 /*
1348                  * The insert key falls 'in the middle' of k
1349                  * The insert key splits k in 3:
1350                  * - start only in k, preserve
1351                  * - middle common section, invalidate in k
1352                  * - end only in k, preserve
1353                  *
1354                  * We update the old key to preserve the start,
1355                  * insert will be the new common section,
1356                  * we manually insert the end that we are preserving.
1357                  *
1358                  * modify k _before_ doing the insert (which will move
1359                  * what k points to)
1360                  */
1361                 bkey_reassemble(&split.k, k.s_c);
1362                 split.k.k.needs_whiteout |= bset_written(b, bset(b, t));
1363
1364                 bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1365                 BUG_ON(bkey_deleted(&split.k.k));
1366
1367                 bch2_cut_subtract_front(s, insert->k.p, k);
1368                 BUG_ON(bkey_deleted(k.k));
1369                 extent_save(b, node_iter, _k, k.k);
1370
1371                 bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
1372                                 bkey_start_offset(&split.k.k),
1373                                 split.k.k.size);
1374                 extent_bset_insert(c, iter, &split.k);
1375                 break;
1376         }
1377         }
1378
1379         return BTREE_INSERT_OK;
1380 }
1381
1382 static enum btree_insert_ret
1383 bch2_delete_fixup_extent(struct extent_insert_state *s)
1384 {
1385         struct bch_fs *c = s->trans->c;
1386         struct btree_iter *iter = s->insert->iter;
1387         struct btree *b = iter->nodes[0];
1388         struct btree_node_iter *node_iter = &iter->node_iters[0];
1389         struct bkey_packed *_k;
1390         struct bkey unpacked;
1391         struct bkey_i *insert = s->insert->k;
1392         enum btree_insert_ret ret = BTREE_INSERT_OK;
1393
1394         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1395
1396         s->whiteout     = *insert;
1397         s->do_journal   = false;
1398
1399         while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1400                (ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK &&
1401                (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
1402                 struct bset_tree *t = bch2_bkey_to_bset(b, _k);
1403                 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1404                 enum bch_extent_overlap overlap;
1405
1406                 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1407                 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1408
1409                 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1410                         break;
1411
1412                 if (bkey_whiteout(k.k)) {
1413                         s->committed = bpos_min(insert->k.p, k.k->p);
1414                         goto next;
1415                 }
1416
1417                 overlap = bch2_extent_overlap(&insert->k, k.k);
1418
1419                 ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
1420                 if (ret != BTREE_INSERT_OK)
1421                         goto stop;
1422
1423                 switch (extent_insert_advance_pos(s, k.s_c)) {
1424                 case BTREE_HOOK_DO_INSERT:
1425                         break;
1426                 case BTREE_HOOK_NO_INSERT:
1427                         continue;
1428                 case BTREE_HOOK_RESTART_TRANS:
1429                         ret = BTREE_INSERT_NEED_TRAVERSE;
1430                         goto stop;
1431                 }
1432
1433                 s->do_journal = true;
1434
1435                 if (overlap == BCH_EXTENT_OVERLAP_ALL) {
1436                         btree_keys_account_key_drop(&b->nr,
1437                                                 t - b->set, _k);
1438                         bch2_subtract_sectors(s, k.s_c,
1439                                              bkey_start_offset(k.k), k.k->size);
1440                         _k->type = KEY_TYPE_DISCARD;
1441                         reserve_whiteout(b, t, _k);
1442                 } else if (k.k->needs_whiteout ||
1443                            bset_written(b, bset(b, t))) {
1444                         struct bkey_i discard = *insert;
1445
1446                         switch (overlap) {
1447                         case BCH_EXTENT_OVERLAP_FRONT:
1448                                 bch2_cut_front(bkey_start_pos(k.k), &discard);
1449                                 break;
1450                         case BCH_EXTENT_OVERLAP_BACK:
1451                                 bch2_cut_back(k.k->p, &discard.k);
1452                                 break;
1453                         default:
1454                                 break;
1455                         }
1456
1457                         discard.k.needs_whiteout = true;
1458
1459                         ret = extent_squash(s, insert, t, _k, k, overlap);
1460                         BUG_ON(ret != BTREE_INSERT_OK);
1461
1462                         extent_bset_insert(c, iter, &discard);
1463                 } else {
1464                         ret = extent_squash(s, insert, t, _k, k, overlap);
1465                         BUG_ON(ret != BTREE_INSERT_OK);
1466                 }
1467 next:
1468                 bch2_cut_front(s->committed, insert);
1469                 bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1470         }
1471
1472         if (bkey_cmp(s->committed, insert->k.p) < 0 &&
1473             ret == BTREE_INSERT_OK &&
1474             extent_insert_advance_pos(s, bkey_s_c_null) == BTREE_HOOK_RESTART_TRANS)
1475                 ret = BTREE_INSERT_NEED_TRAVERSE;
1476 stop:
1477         extent_insert_committed(s);
1478
1479         bch2_fs_usage_apply(c, &s->stats, s->trans->disk_res,
1480                            gc_pos_btree_node(b));
1481
1482         EBUG_ON(bkey_cmp(iter->pos, s->committed));
1483         EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) !=
1484                 !!(iter->flags & BTREE_ITER_AT_END_OF_LEAF));
1485
1486         bch2_cut_front(iter->pos, insert);
1487
1488         if (insert->k.size && (iter->flags & BTREE_ITER_AT_END_OF_LEAF))
1489                 ret = BTREE_INSERT_NEED_TRAVERSE;
1490
1491         EBUG_ON(insert->k.size && ret == BTREE_INSERT_OK);
1492
1493         return ret;
1494 }
1495
1496 /**
1497  * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1498  *
1499  * this may result in not actually doing the insert, or inserting some subset
1500  * of the insert key. For cmpxchg operations this is where that logic lives.
1501  *
1502  * All subsets of @insert that need to be inserted are inserted using
1503  * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
1504  * returns false, setting @iter->pos for the prefix of @insert that actually got
1505  * inserted.
1506  *
1507  * BSET INVARIANTS: this function is responsible for maintaining all the
1508  * invariants for bsets of extents in memory. things get really hairy with 0
1509  * size extents
1510  *
1511  * within one bset:
1512  *
1513  * bkey_start_pos(bkey_next(k)) >= k
1514  * or bkey_start_offset(bkey_next(k)) >= k->offset
1515  *
1516  * i.e. strict ordering, no overlapping extents.
1517  *
1518  * multiple bsets (i.e. full btree node):
1519  *
1520  * âˆ€ k, j
1521  *   k.size != 0 âˆ§ j.size != 0 â†’
1522  *     Â¬ (k > bkey_start_pos(j) âˆ§ k < j)
1523  *
1524  * i.e. no two overlapping keys _of nonzero size_
1525  *
1526  * We can't realistically maintain this invariant for zero size keys because of
1527  * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
1528  * there may be another 0 size key between them in another bset, and it will
1529  * thus overlap with the merged key.
1530  *
1531  * In addition, the end of iter->pos indicates how much has been processed.
1532  * If the end of iter->pos is not the same as the end of insert, then
1533  * key insertion needs to continue/be retried.
1534  */
1535 enum btree_insert_ret
1536 bch2_insert_fixup_extent(struct btree_insert *trans,
1537                          struct btree_insert_entry *insert)
1538 {
1539         struct bch_fs *c = trans->c;
1540         struct btree_iter *iter = insert->iter;
1541         struct btree *b = iter->nodes[0];
1542         struct btree_node_iter *node_iter = &iter->node_iters[0];
1543         struct bkey_packed *_k;
1544         struct bkey unpacked;
1545         enum btree_insert_ret ret = BTREE_INSERT_OK;
1546
1547         struct extent_insert_state s = {
1548                 .trans          = trans,
1549                 .insert         = insert,
1550                 .committed      = insert->iter->pos,
1551                 .deleting       = bkey_whiteout(&insert->k->k),
1552         };
1553
1554         EBUG_ON(iter->level);
1555         EBUG_ON(bkey_deleted(&insert->k->k) || !insert->k->k.size);
1556
1557         if (s.deleting)
1558                 return bch2_delete_fixup_extent(&s);
1559
1560         /*
1561          * As we process overlapping extents, we advance @iter->pos both to
1562          * signal to our caller (btree_insert_key()) how much of @insert->k has
1563          * been inserted, and also to keep @iter->pos consistent with
1564          * @insert->k and the node iterator that we're advancing:
1565          */
1566         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1567
1568         if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1569                 bch2_add_sectors(&s, bkey_i_to_s_c(insert->k),
1570                                 bkey_start_offset(&insert->k->k),
1571                                 insert->k->k.size);
1572
1573         while (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
1574                (ret = extent_insert_should_stop(&s)) == BTREE_INSERT_OK &&
1575                (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
1576                 struct bset_tree *t = bch2_bkey_to_bset(b, _k);
1577                 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1578                 enum bch_extent_overlap overlap;
1579
1580                 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1581                 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1582
1583                 if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0)
1584                         break;
1585
1586                 overlap = bch2_extent_overlap(&insert->k->k, k.k);
1587
1588                 ret = extent_insert_check_split_compressed(&s, k.s_c, overlap);
1589                 if (ret != BTREE_INSERT_OK)
1590                         goto stop;
1591
1592                 if (!k.k->size)
1593                         goto squash;
1594
1595                 /*
1596                  * Only call advance pos & call hook for nonzero size extents:
1597                  * If hook returned BTREE_HOOK_NO_INSERT, @insert->k no longer
1598                  * overlaps with @k:
1599                  */
1600                 switch (extent_insert_advance_pos(&s, k.s_c)) {
1601                 case BTREE_HOOK_DO_INSERT:
1602                         break;
1603                 case BTREE_HOOK_NO_INSERT:
1604                         continue;
1605                 case BTREE_HOOK_RESTART_TRANS:
1606                         ret = BTREE_INSERT_NEED_TRAVERSE;
1607                         goto stop;
1608                 }
1609
1610                 if (k.k->size &&
1611                     (k.k->needs_whiteout || bset_written(b, bset(b, t))))
1612                         insert->k->k.needs_whiteout = true;
1613
1614                 if (overlap == BCH_EXTENT_OVERLAP_ALL &&
1615                     bkey_whiteout(k.k) &&
1616                     k.k->needs_whiteout) {
1617                         unreserve_whiteout(b, t, _k);
1618                         _k->needs_whiteout = false;
1619                 }
1620 squash:
1621                 ret = extent_squash(&s, insert->k, t, _k, k, overlap);
1622                 if (ret != BTREE_INSERT_OK)
1623                         goto stop;
1624         }
1625
1626         if (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
1627             ret == BTREE_INSERT_OK &&
1628             extent_insert_advance_pos(&s, bkey_s_c_null) == BTREE_HOOK_RESTART_TRANS)
1629                 ret = BTREE_INSERT_NEED_TRAVERSE;
1630 stop:
1631         extent_insert_committed(&s);
1632         /*
1633          * Subtract any remaining sectors from @insert, if we bailed out early
1634          * and didn't fully insert @insert:
1635          */
1636         if (insert->k->k.size &&
1637             !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1638                 bch2_subtract_sectors(&s, bkey_i_to_s_c(insert->k),
1639                                      bkey_start_offset(&insert->k->k),
1640                                      insert->k->k.size);
1641
1642         bch2_fs_usage_apply(c, &s.stats, trans->disk_res,
1643                            gc_pos_btree_node(b));
1644
1645         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1646         EBUG_ON(bkey_cmp(iter->pos, s.committed));
1647         EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) !=
1648                 !!(iter->flags & BTREE_ITER_AT_END_OF_LEAF));
1649
1650         if (insert->k->k.size && (iter->flags & BTREE_ITER_AT_END_OF_LEAF))
1651                 ret = BTREE_INSERT_NEED_TRAVERSE;
1652
1653         EBUG_ON(insert->k->k.size && ret == BTREE_INSERT_OK);
1654
1655         return ret;
1656 }
1657
1658 static const char *bch2_extent_invalid(const struct bch_fs *c,
1659                                        struct bkey_s_c k)
1660 {
1661         if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
1662                 return "value too big";
1663
1664         if (!k.k->size)
1665                 return "zero key size";
1666
1667         switch (k.k->type) {
1668         case BCH_EXTENT:
1669         case BCH_EXTENT_CACHED: {
1670                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1671                 const union bch_extent_entry *entry;
1672                 const union bch_extent_crc *crc;
1673                 const struct bch_extent_ptr *ptr;
1674                 unsigned size_ondisk = e.k->size;
1675                 const char *reason;
1676
1677                 extent_for_each_entry(e, entry) {
1678                         if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1679                                 return "invalid extent entry type";
1680
1681                         if (extent_entry_is_crc(entry)) {
1682                                 crc = entry_to_crc(entry);
1683
1684                                 if (crc_offset(crc) + e.k->size >
1685                                     crc_uncompressed_size(e.k, crc))
1686                                         return "checksum offset + key size > uncompressed size";
1687
1688                                 size_ondisk = crc_compressed_size(e.k, crc);
1689
1690                                 if (!bch2_checksum_type_valid(c, crc_csum_type(crc)))
1691                                         return "invalid checksum type";
1692
1693                                 if (crc_compression_type(crc) >= BCH_COMPRESSION_NR)
1694                                         return "invalid compression type";
1695                         } else {
1696                                 ptr = entry_to_ptr(entry);
1697
1698                                 reason = extent_ptr_invalid(c, e, &entry->ptr,
1699                                                             size_ondisk, false);
1700                                 if (reason)
1701                                         return reason;
1702                         }
1703                 }
1704
1705                 return NULL;
1706         }
1707
1708         case BCH_RESERVATION: {
1709                 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1710
1711                 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1712                         return "incorrect value size";
1713
1714                 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1715                         return "invalid nr_replicas";
1716
1717                 return NULL;
1718         }
1719
1720         default:
1721                 return "invalid value type";
1722         }
1723 }
1724
1725 static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
1726                                           struct bkey_s_c_extent e)
1727 {
1728         const struct bch_extent_ptr *ptr;
1729         struct bch_dev *ca;
1730         struct bucket *g;
1731         unsigned seq, stale;
1732         char buf[160];
1733         bool bad;
1734         unsigned ptrs_per_tier[BCH_TIER_MAX];
1735         unsigned replicas = 0;
1736
1737         /*
1738          * XXX: we should be doing most/all of these checks at startup time,
1739          * where we check bch2_bkey_invalid() in btree_node_read_done()
1740          *
1741          * But note that we can't check for stale pointers or incorrect gc marks
1742          * until after journal replay is done (it might be an extent that's
1743          * going to get overwritten during replay)
1744          */
1745
1746         memset(ptrs_per_tier, 0, sizeof(ptrs_per_tier));
1747
1748         extent_for_each_ptr(e, ptr) {
1749                 ca = c->devs[ptr->dev];
1750                 g = PTR_BUCKET(ca, ptr);
1751                 replicas++;
1752                 ptrs_per_tier[ca->mi.tier]++;
1753
1754                 /*
1755                  * If journal replay hasn't finished, we might be seeing keys
1756                  * that will be overwritten by the time journal replay is done:
1757                  */
1758                 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1759                         continue;
1760
1761                 stale = 0;
1762
1763                 do {
1764                         struct bucket_mark mark;
1765
1766                         seq = read_seqcount_begin(&c->gc_pos_lock);
1767                         mark = READ_ONCE(g->mark);
1768
1769                         /* between mark and bucket gen */
1770                         smp_rmb();
1771
1772                         stale = ptr_stale(ca, ptr);
1773
1774                         bch2_fs_bug_on(stale && !ptr->cached, c,
1775                                          "stale dirty pointer");
1776
1777                         bch2_fs_bug_on(stale > 96, c,
1778                                          "key too stale: %i",
1779                                          stale);
1780
1781                         if (stale)
1782                                 break;
1783
1784                         bad = (mark.data_type != BUCKET_DATA ||
1785                                (gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
1786                                 !mark.owned_by_allocator &&
1787                                 !(ptr->cached
1788                                   ? mark.cached_sectors
1789                                   : mark.dirty_sectors)));
1790                 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
1791
1792                 if (bad)
1793                         goto bad_ptr;
1794         }
1795
1796         if (replicas > BCH_REPLICAS_MAX) {
1797                 bch2_bkey_val_to_text(c, btree_node_type(b), buf,
1798                                      sizeof(buf), e.s_c);
1799                 bch2_fs_bug(c,
1800                         "extent key bad (too many replicas: %u): %s",
1801                         replicas, buf);
1802                 return;
1803         }
1804
1805         if (!bkey_extent_is_cached(e.k) &&
1806             !bch2_sb_has_replicas(c, e, BCH_DATA_USER)) {
1807                 bch2_bkey_val_to_text(c, btree_node_type(b),
1808                                      buf, sizeof(buf), e.s_c);
1809                 bch2_fs_bug(c,
1810                         "extent key bad (replicas not marked in superblock):\n%s",
1811                         buf);
1812                 return;
1813         }
1814
1815         return;
1816
1817 bad_ptr:
1818         bch2_bkey_val_to_text(c, btree_node_type(b), buf,
1819                              sizeof(buf), e.s_c);
1820         bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu "
1821                    "gen %i last_gc %i mark 0x%08x",
1822                    buf, PTR_BUCKET_NR(ca, ptr), PTR_BUCKET(ca, ptr)->mark.gen,
1823                    ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
1824                    (unsigned) g->mark.counter);
1825         return;
1826 }
1827
1828 static void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b,
1829                                    struct bkey_s_c k)
1830 {
1831         switch (k.k->type) {
1832         case BCH_EXTENT:
1833         case BCH_EXTENT_CACHED:
1834                 bch2_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
1835                 break;
1836         case BCH_RESERVATION:
1837                 break;
1838         default:
1839                 BUG();
1840         }
1841 }
1842
1843 static void bch2_extent_to_text(struct bch_fs *c, char *buf,
1844                                 size_t size, struct bkey_s_c k)
1845 {
1846         char *out = buf, *end = buf + size;
1847         const char *invalid;
1848
1849 #define p(...)  (out += scnprintf(out, end - out, __VA_ARGS__))
1850
1851         if (bkey_extent_is_data(k.k))
1852                 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
1853
1854         invalid = bch2_extent_invalid(c, k);
1855         if (invalid)
1856                 p(" invalid: %s", invalid);
1857 #undef p
1858 }
1859
1860 static unsigned PTR_TIER(struct bch_fs *c,
1861                          const struct bch_extent_ptr *ptr)
1862 {
1863         return c->devs[ptr->dev]->mi.tier;
1864 }
1865
1866 static void bch2_extent_crc_init(union bch_extent_crc *crc,
1867                                  unsigned compressed_size,
1868                                  unsigned uncompressed_size,
1869                                  unsigned compression_type,
1870                                  unsigned nonce,
1871                                  struct bch_csum csum, unsigned csum_type)
1872 {
1873         if (bch_crc_bytes[csum_type]    <= 4 &&
1874             uncompressed_size           <= CRC32_SIZE_MAX &&
1875             nonce                       <= CRC32_NONCE_MAX) {
1876                 crc->crc32 = (struct bch_extent_crc32) {
1877                         .type = 1 << BCH_EXTENT_ENTRY_crc32,
1878                         ._compressed_size       = compressed_size - 1,
1879                         ._uncompressed_size     = uncompressed_size - 1,
1880                         .offset                 = 0,
1881                         .compression_type       = compression_type,
1882                         .csum_type              = csum_type,
1883                         .csum                   = *((__le32 *) &csum.lo),
1884                 };
1885                 return;
1886         }
1887
1888         if (bch_crc_bytes[csum_type]    <= 10 &&
1889             uncompressed_size           <= CRC64_SIZE_MAX &&
1890             nonce                       <= CRC64_NONCE_MAX) {
1891                 crc->crc64 = (struct bch_extent_crc64) {
1892                         .type = 1 << BCH_EXTENT_ENTRY_crc64,
1893                         ._compressed_size       = compressed_size - 1,
1894                         ._uncompressed_size     = uncompressed_size - 1,
1895                         .offset                 = 0,
1896                         .nonce                  = nonce,
1897                         .compression_type       = compression_type,
1898                         .csum_type              = csum_type,
1899                         .csum_lo                = csum.lo,
1900                         .csum_hi                = *((__le16 *) &csum.hi),
1901                 };
1902                 return;
1903         }
1904
1905         if (bch_crc_bytes[csum_type]    <= 16 &&
1906             uncompressed_size           <= CRC128_SIZE_MAX &&
1907             nonce                       <= CRC128_NONCE_MAX) {
1908                 crc->crc128 = (struct bch_extent_crc128) {
1909                         .type = 1 << BCH_EXTENT_ENTRY_crc128,
1910                         ._compressed_size       = compressed_size - 1,
1911                         ._uncompressed_size     = uncompressed_size - 1,
1912                         .offset                 = 0,
1913                         .nonce                  = nonce,
1914                         .compression_type       = compression_type,
1915                         .csum_type              = csum_type,
1916                         .csum                   = csum,
1917                 };
1918                 return;
1919         }
1920
1921         BUG();
1922 }
1923
1924 void bch2_extent_crc_append(struct bkey_i_extent *e,
1925                             unsigned compressed_size,
1926                             unsigned uncompressed_size,
1927                             unsigned compression_type,
1928                             unsigned nonce,
1929                             struct bch_csum csum, unsigned csum_type)
1930 {
1931         union bch_extent_crc *crc;
1932
1933         BUG_ON(compressed_size > uncompressed_size);
1934         BUG_ON(uncompressed_size != e->k.size);
1935         BUG_ON(!compressed_size || !uncompressed_size);
1936
1937         /*
1938          * Look up the last crc entry, so we can check if we need to add
1939          * another:
1940          */
1941         extent_for_each_crc(extent_i_to_s(e), crc)
1942                 ;
1943
1944         if (!crc && !csum_type && !compression_type)
1945                 return;
1946
1947         if (crc &&
1948             crc_compressed_size(&e->k, crc)     == compressed_size &&
1949             crc_uncompressed_size(&e->k, crc)   == uncompressed_size &&
1950             crc_offset(crc)                     == 0 &&
1951             crc_nonce(crc)                      == nonce &&
1952             crc_csum_type(crc)                  == csum_type &&
1953             crc_compression_type(crc)           == compression_type &&
1954             crc_csum(crc).lo                    == csum.lo &&
1955             crc_csum(crc).hi                    == csum.hi)
1956                 return;
1957
1958         bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)),
1959                             compressed_size,
1960                             uncompressed_size,
1961                             compression_type,
1962                             nonce, csum, csum_type);
1963         __extent_entry_push(e);
1964 }
1965
1966 /*
1967  * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1968  *
1969  * Returns true if @k should be dropped entirely
1970  *
1971  * For existing keys, only called when btree nodes are being rewritten, not when
1972  * they're merely being compacted/resorted in memory.
1973  */
1974 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1975 {
1976         struct bkey_s_extent e;
1977
1978         switch (k.k->type) {
1979         case KEY_TYPE_ERROR:
1980                 return false;
1981
1982         case KEY_TYPE_DELETED:
1983         case KEY_TYPE_COOKIE:
1984                 return true;
1985
1986         case KEY_TYPE_DISCARD:
1987                 return bversion_zero(k.k->version);
1988
1989         case BCH_EXTENT:
1990         case BCH_EXTENT_CACHED:
1991                 e = bkey_s_to_extent(k);
1992
1993                 bch2_extent_drop_stale(c, e);
1994
1995                 if (!bkey_val_u64s(e.k)) {
1996                         if (bkey_extent_is_cached(e.k)) {
1997                                 k.k->type = KEY_TYPE_DISCARD;
1998                                 if (bversion_zero(k.k->version))
1999                                         return true;
2000                         } else {
2001                                 k.k->type = KEY_TYPE_ERROR;
2002                         }
2003                 }
2004
2005                 return false;
2006         case BCH_RESERVATION:
2007                 return false;
2008         default:
2009                 BUG();
2010         }
2011 }
2012
2013 void bch2_extent_mark_replicas_cached(struct bch_fs *c,
2014                                       struct bkey_s_extent e,
2015                                       unsigned nr_cached)
2016 {
2017         struct bch_extent_ptr *ptr;
2018         bool have_higher_tier;
2019         unsigned tier = 0;
2020
2021         if (!nr_cached)
2022                 return;
2023
2024         do {
2025                 have_higher_tier = false;
2026
2027                 extent_for_each_ptr(e, ptr) {
2028                         if (!ptr->cached &&
2029                             PTR_TIER(c, ptr) == tier) {
2030                                 ptr->cached = true;
2031                                 nr_cached--;
2032                                 if (!nr_cached)
2033                                         return;
2034                         }
2035
2036                         if (PTR_TIER(c, ptr) > tier)
2037                                 have_higher_tier = true;
2038                 }
2039
2040                 tier++;
2041         } while (have_higher_tier);
2042 }
2043
2044 /*
2045  * This picks a non-stale pointer, preferabbly from a device other than
2046  * avoid.  Avoid can be NULL, meaning pick any.  If there are no non-stale
2047  * pointers to other devices, it will still pick a pointer from avoid.
2048  * Note that it prefers lowered-numbered pointers to higher-numbered pointers
2049  * as the pointers are sorted by tier, hence preferring pointers to tier 0
2050  * rather than pointers to tier 1.
2051  */
2052 void bch2_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k,
2053                           struct bch_devs_mask *avoid,
2054                           struct extent_pick_ptr *ret)
2055 {
2056         struct bkey_s_c_extent e;
2057
2058         switch (k.k->type) {
2059         case KEY_TYPE_DELETED:
2060         case KEY_TYPE_DISCARD:
2061         case KEY_TYPE_COOKIE:
2062                 ret->ca = NULL;
2063                 return;
2064
2065         case KEY_TYPE_ERROR:
2066                 ret->ca = ERR_PTR(-EIO);
2067                 return;
2068
2069         case BCH_EXTENT:
2070         case BCH_EXTENT_CACHED:
2071                 e = bkey_s_c_to_extent(k);
2072                 ret->ca = NULL;
2073
2074                 extent_pick_read_device(c, bkey_s_c_to_extent(k), avoid, ret);
2075
2076                 if (!ret->ca && !bkey_extent_is_cached(e.k))
2077                         ret->ca = ERR_PTR(-EIO);
2078                 return;
2079
2080         case BCH_RESERVATION:
2081                 ret->ca = NULL;
2082                 return;
2083
2084         default:
2085                 BUG();
2086         }
2087 }
2088
2089 static enum merge_result bch2_extent_merge(struct bch_fs *c,
2090                                            struct btree *bk,
2091                                            struct bkey_i *l, struct bkey_i *r)
2092 {
2093         struct bkey_s_extent el, er;
2094         union bch_extent_entry *en_l, *en_r;
2095
2096         if (key_merging_disabled(c))
2097                 return BCH_MERGE_NOMERGE;
2098
2099         /*
2100          * Generic header checks
2101          * Assumes left and right are in order
2102          * Left and right must be exactly aligned
2103          */
2104
2105         if (l->k.u64s           != r->k.u64s ||
2106             l->k.type           != r->k.type ||
2107             bversion_cmp(l->k.version, r->k.version) ||
2108             bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
2109                 return BCH_MERGE_NOMERGE;
2110
2111         switch (l->k.type) {
2112         case KEY_TYPE_DELETED:
2113         case KEY_TYPE_DISCARD:
2114         case KEY_TYPE_ERROR:
2115                 /* These types are mergeable, and no val to check */
2116                 break;
2117
2118         case BCH_EXTENT:
2119         case BCH_EXTENT_CACHED:
2120                 el = bkey_i_to_s_extent(l);
2121                 er = bkey_i_to_s_extent(r);
2122
2123                 extent_for_each_entry(el, en_l) {
2124                         struct bch_extent_ptr *lp, *rp;
2125                         struct bch_dev *ca;
2126
2127                         en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
2128
2129                         if ((extent_entry_type(en_l) !=
2130                              extent_entry_type(en_r)) ||
2131                             extent_entry_is_crc(en_l))
2132                                 return BCH_MERGE_NOMERGE;
2133
2134                         lp = &en_l->ptr;
2135                         rp = &en_r->ptr;
2136
2137                         if (lp->offset + el.k->size     != rp->offset ||
2138                             lp->dev                     != rp->dev ||
2139                             lp->gen                     != rp->gen)
2140                                 return BCH_MERGE_NOMERGE;
2141
2142                         /* We don't allow extents to straddle buckets: */
2143                         ca = c->devs[lp->dev];
2144
2145                         if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
2146                                 return BCH_MERGE_NOMERGE;
2147                 }
2148
2149                 break;
2150         case BCH_RESERVATION: {
2151                 struct bkey_i_reservation *li = bkey_i_to_reservation(l);
2152                 struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
2153
2154                 if (li->v.generation != ri->v.generation ||
2155                     li->v.nr_replicas != ri->v.nr_replicas)
2156                         return BCH_MERGE_NOMERGE;
2157                 break;
2158         }
2159         default:
2160                 return BCH_MERGE_NOMERGE;
2161         }
2162
2163         l->k.needs_whiteout |= r->k.needs_whiteout;
2164
2165         /* Keys with no pointers aren't restricted to one bucket and could
2166          * overflow KEY_SIZE
2167          */
2168         if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
2169                 bch2_key_resize(&l->k, KEY_SIZE_MAX);
2170                 bch2_cut_front(l->k.p, r);
2171                 return BCH_MERGE_PARTIAL;
2172         }
2173
2174         bch2_key_resize(&l->k, l->k.size + r->k.size);
2175
2176         return BCH_MERGE_MERGE;
2177 }
2178
2179 static void extent_i_save(struct btree *b, struct bkey_packed *dst,
2180                           struct bkey_i *src)
2181 {
2182         struct bkey_format *f = &b->format;
2183         struct bkey_i *dst_unpacked;
2184
2185         BUG_ON(bkeyp_val_u64s(f, dst) != bkey_val_u64s(&src->k));
2186
2187         /*
2188          * We don't want the bch2_verify_key_order() call in extent_save(),
2189          * because we may be out of order with deleted keys that are about to be
2190          * removed by extent_bset_insert()
2191          */
2192
2193         if ((dst_unpacked = packed_to_bkey(dst)))
2194                 bkey_copy(dst_unpacked, src);
2195         else
2196                 BUG_ON(!bch2_bkey_pack(dst, src, f));
2197 }
2198
2199 static bool extent_merge_one_overlapping(struct btree_iter *iter,
2200                                          struct bpos new_pos,
2201                                          struct bset_tree *t,
2202                                          struct bkey_packed *k, struct bkey uk,
2203                                          bool check, bool could_pack)
2204 {
2205         struct btree *b = iter->nodes[0];
2206         struct btree_node_iter *node_iter = &iter->node_iters[0];
2207
2208         BUG_ON(!bkey_deleted(k));
2209
2210         if (check) {
2211                 return !bkey_packed(k) || could_pack;
2212         } else {
2213                 uk.p = new_pos;
2214                 extent_save(b, node_iter, k, &uk);
2215                 bch2_bset_fix_invalidated_key(b, t, k);
2216                 bch2_btree_node_iter_fix(iter, b, node_iter, t,
2217                                         k, k->u64s, k->u64s);
2218                 return true;
2219         }
2220 }
2221
2222 static bool extent_merge_do_overlapping(struct btree_iter *iter,
2223                                         struct bkey *m, bool back_merge)
2224 {
2225         struct btree *b = iter->nodes[0];
2226         struct btree_node_iter *node_iter = &iter->node_iters[0];
2227         struct bset_tree *t;
2228         struct bkey_packed *k;
2229         struct bkey uk;
2230         struct bpos new_pos = back_merge ? m->p : bkey_start_pos(m);
2231         bool could_pack = bkey_pack_pos((void *) &uk, new_pos, b);
2232         bool check = true;
2233
2234         /*
2235          * @m is the new merged extent:
2236          *
2237          * The merge took place in the last bset; we know there can't be any 0
2238          * size extents overlapping with m there because if so they would have
2239          * been between the two extents we merged.
2240          *
2241          * But in the other bsets, we have to check for and fix such extents:
2242          */
2243 do_fixup:
2244         for_each_bset(b, t) {
2245                 if (t == bset_tree_last(b))
2246                         break;
2247
2248                 /*
2249                  * if we don't find this bset in the iterator we already got to
2250                  * the end of that bset, so start searching from the end.
2251                  */
2252                 k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
2253
2254                 if (k == btree_bkey_last(b, t))
2255                         k = bch2_bkey_prev_all(b, t, k);
2256                 if (!k)
2257                         continue;
2258
2259                 if (back_merge) {
2260                         /*
2261                          * Back merge: 0 size extents will be before the key
2262                          * that was just inserted (and thus the iterator
2263                          * position) - walk backwards to find them
2264                          */
2265                         for (;
2266                              k &&
2267                              (uk = bkey_unpack_key(b, k),
2268                               bkey_cmp(uk.p, bkey_start_pos(m)) > 0);
2269                              k = bch2_bkey_prev_all(b, t, k)) {
2270                                 if (bkey_cmp(uk.p, m->p) >= 0)
2271                                         continue;
2272
2273                                 if (!extent_merge_one_overlapping(iter, new_pos,
2274                                                 t, k, uk, check, could_pack))
2275                                         return false;
2276                         }
2277                 } else {
2278                         /* Front merge - walk forwards */
2279                         for (;
2280                              k != btree_bkey_last(b, t) &&
2281                              (uk = bkey_unpack_key(b, k),
2282                               bkey_cmp(uk.p, m->p) < 0);
2283                              k = bkey_next(k)) {
2284                                 if (bkey_cmp(uk.p,
2285                                              bkey_start_pos(m)) <= 0)
2286                                         continue;
2287
2288                                 if (!extent_merge_one_overlapping(iter, new_pos,
2289                                                 t, k, uk, check, could_pack))
2290                                         return false;
2291                         }
2292                 }
2293         }
2294
2295         if (check) {
2296                 check = false;
2297                 goto do_fixup;
2298         }
2299
2300         return true;
2301 }
2302
2303 /*
2304  * When merging an extent that we're inserting into a btree node, the new merged
2305  * extent could overlap with an existing 0 size extent - if we don't fix that,
2306  * it'll break the btree node iterator so this code finds those 0 size extents
2307  * and shifts them out of the way.
2308  *
2309  * Also unpacks and repacks.
2310  */
2311 static bool bch2_extent_merge_inline(struct bch_fs *c,
2312                                      struct btree_iter *iter,
2313                                      struct bkey_packed *l,
2314                                      struct bkey_packed *r,
2315                                      bool back_merge)
2316 {
2317         struct btree *b = iter->nodes[0];
2318         struct btree_node_iter *node_iter = &iter->node_iters[0];
2319         const struct bkey_format *f = &b->format;
2320         struct bset_tree *t = bset_tree_last(b);
2321         struct bkey_packed *m;
2322         BKEY_PADDED(k) li;
2323         BKEY_PADDED(k) ri;
2324         struct bkey_i *mi;
2325         struct bkey tmp;
2326
2327         /*
2328          * We need to save copies of both l and r, because we might get a
2329          * partial merge (which modifies both) and then fails to repack
2330          */
2331         bch2_bkey_unpack(b, &li.k, l);
2332         bch2_bkey_unpack(b, &ri.k, r);
2333
2334         m = back_merge ? l : r;
2335         mi = back_merge ? &li.k : &ri.k;
2336
2337         /* l & r should be in last bset: */
2338         EBUG_ON(bch2_bkey_to_bset(b, m) != t);
2339
2340         switch (bch2_extent_merge(c, b, &li.k, &ri.k)) {
2341         case BCH_MERGE_NOMERGE:
2342                 return false;
2343         case BCH_MERGE_PARTIAL:
2344                 if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &mi->k, f))
2345                         return false;
2346
2347                 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2348                         return false;
2349
2350                 extent_i_save(b, m, mi);
2351                 bch2_bset_fix_invalidated_key(b, t, m);
2352
2353                 /*
2354                  * Update iterator to reflect what we just inserted - otherwise,
2355                  * the iter_fix() call is going to put us _before_ the key we
2356                  * just partially merged with:
2357                  */
2358                 if (back_merge)
2359                         bch2_btree_iter_set_pos_same_leaf(iter, li.k.k.p);
2360
2361                 bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
2362                                         t, m, m->u64s, m->u64s);
2363
2364                 if (!back_merge)
2365                         bkey_copy(packed_to_bkey(l), &li.k);
2366                 else
2367                         bkey_copy(packed_to_bkey(r), &ri.k);
2368                 return false;
2369         case BCH_MERGE_MERGE:
2370                 if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &li.k.k, f))
2371                         return false;
2372
2373                 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2374                         return false;
2375
2376                 extent_i_save(b, m, &li.k);
2377                 bch2_bset_fix_invalidated_key(b, t, m);
2378
2379                 bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
2380                                         t, m, m->u64s, m->u64s);
2381                 return true;
2382         default:
2383                 BUG();
2384         }
2385 }
2386
2387 const struct bkey_ops bch2_bkey_extent_ops = {
2388         .key_invalid    = bch2_extent_invalid,
2389         .key_debugcheck = bch2_extent_debugcheck,
2390         .val_to_text    = bch2_extent_to_text,
2391         .swab           = bch2_ptr_swab,
2392         .key_normalize  = bch2_ptr_normalize,
2393         .key_merge      = bch2_extent_merge,
2394         .is_extents     = true,
2395 };