]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extents.c
Update bcachefs sources to d7dbddc450 bcachefs: revamp to_text methods
[bcachefs-tools-debian] / libbcachefs / extents.c
1 /*
2  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3  *
4  * Code for managing the extent btree and dynamically updating the writeback
5  * dirty sector count.
6  */
7
8 #include "bcachefs.h"
9 #include "bkey_methods.h"
10 #include "btree_gc.h"
11 #include "btree_update.h"
12 #include "btree_update_interior.h"
13 #include "buckets.h"
14 #include "checksum.h"
15 #include "debug.h"
16 #include "dirent.h"
17 #include "disk_groups.h"
18 #include "error.h"
19 #include "extents.h"
20 #include "inode.h"
21 #include "journal.h"
22 #include "replicas.h"
23 #include "super.h"
24 #include "super-io.h"
25 #include "util.h"
26 #include "xattr.h"
27
28 #include <trace/events/bcachefs.h>
29
30 static void sort_key_next(struct btree_node_iter_large *iter,
31                           struct btree *b,
32                           struct btree_node_iter_set *i)
33 {
34         i->k += __btree_node_offset_to_key(b, i->k)->u64s;
35
36         if (i->k == i->end)
37                 *i = iter->data[--iter->used];
38 }
39
40 /*
41  * Returns true if l > r - unless l == r, in which case returns true if l is
42  * older than r.
43  *
44  * Necessary for btree_sort_fixup() - if there are multiple keys that compare
45  * equal in different sets, we have to process them newest to oldest.
46  */
47 #define key_sort_cmp(h, l, r)                                           \
48 ({                                                                      \
49         bkey_cmp_packed(b,                                              \
50                         __btree_node_offset_to_key(b, (l).k),           \
51                         __btree_node_offset_to_key(b, (r).k))           \
52                                                                         \
53         ?: (l).k - (r).k;                                               \
54 })
55
56 static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
57                                         struct btree *b)
58 {
59         struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
60         struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
61
62         if (bkey_whiteout(k))
63                 return true;
64
65         if (iter->used < 2)
66                 return false;
67
68         if (iter->used > 2 &&
69             key_sort_cmp(iter, r[0], r[1]) >= 0)
70                 r++;
71
72         /*
73          * key_sort_cmp() ensures that when keys compare equal the older key
74          * comes first; so if l->k compares equal to r->k then l->k is older and
75          * should be dropped.
76          */
77         return !bkey_cmp_packed(b,
78                                 __btree_node_offset_to_key(b, l->k),
79                                 __btree_node_offset_to_key(b, r->k));
80 }
81
82 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
83                                         struct btree *b,
84                                         struct btree_node_iter_large *iter)
85 {
86         struct bkey_packed *out = dst->start;
87         struct btree_nr_keys nr;
88
89         memset(&nr, 0, sizeof(nr));
90
91         heap_resort(iter, key_sort_cmp, NULL);
92
93         while (!bch2_btree_node_iter_large_end(iter)) {
94                 if (!should_drop_next_key(iter, b)) {
95                         struct bkey_packed *k =
96                                 __btree_node_offset_to_key(b, iter->data->k);
97
98                         bkey_copy(out, k);
99                         btree_keys_account_key_add(&nr, 0, out);
100                         out = bkey_next(out);
101                 }
102
103                 sort_key_next(iter, b, iter->data);
104                 heap_sift_down(iter, 0, key_sort_cmp, NULL);
105         }
106
107         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
108         return nr;
109 }
110
111 /* Common among btree and extent ptrs */
112
113 const struct bch_extent_ptr *
114 bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
115 {
116         const struct bch_extent_ptr *ptr;
117
118         extent_for_each_ptr(e, ptr)
119                 if (ptr->dev == dev)
120                         return ptr;
121
122         return NULL;
123 }
124
125 void bch2_extent_drop_device(struct bkey_s_extent e, unsigned dev)
126 {
127         struct bch_extent_ptr *ptr;
128
129         bch2_extent_drop_ptrs(e, ptr, ptr->dev == dev);
130 }
131
132 const struct bch_extent_ptr *
133 bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
134 {
135         const struct bch_extent_ptr *ptr;
136
137         extent_for_each_ptr(e, ptr) {
138                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
139
140                 if (ca->mi.group &&
141                     ca->mi.group - 1 == group)
142                         return ptr;
143         }
144
145         return NULL;
146 }
147
148 const struct bch_extent_ptr *
149 bch2_extent_has_target(struct bch_fs *c, struct bkey_s_c_extent e, unsigned target)
150 {
151         const struct bch_extent_ptr *ptr;
152
153         extent_for_each_ptr(e, ptr)
154                 if (bch2_dev_in_target(c, ptr->dev, target) &&
155                     (!ptr->cached ||
156                      !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
157                         return ptr;
158
159         return NULL;
160 }
161
162 unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e)
163 {
164         const struct bch_extent_ptr *ptr;
165         unsigned nr_ptrs = 0;
166
167         extent_for_each_ptr(e, ptr)
168                 nr_ptrs++;
169
170         return nr_ptrs;
171 }
172
173 unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c k)
174 {
175         struct bkey_s_c_extent e;
176         const struct bch_extent_ptr *ptr;
177         unsigned nr_ptrs = 0;
178
179         switch (k.k->type) {
180         case BCH_EXTENT:
181         case BCH_EXTENT_CACHED:
182                 e = bkey_s_c_to_extent(k);
183
184                 extent_for_each_ptr(e, ptr)
185                         nr_ptrs += !ptr->cached;
186                 break;
187
188         case BCH_RESERVATION:
189                 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
190                 break;
191         }
192
193         return nr_ptrs;
194 }
195
196 unsigned bch2_extent_ptr_durability(struct bch_fs *c,
197                                     const struct bch_extent_ptr *ptr)
198 {
199         struct bch_dev *ca;
200
201         if (ptr->cached)
202                 return 0;
203
204         ca = bch_dev_bkey_exists(c, ptr->dev);
205
206         if (ca->mi.state == BCH_MEMBER_STATE_FAILED)
207                 return 0;
208
209         return ca->mi.durability;
210 }
211
212 unsigned bch2_extent_durability(struct bch_fs *c, struct bkey_s_c_extent e)
213 {
214         const struct bch_extent_ptr *ptr;
215         unsigned durability = 0;
216
217         extent_for_each_ptr(e, ptr)
218                 durability += bch2_extent_ptr_durability(c, ptr);
219
220         return durability;
221 }
222
223 unsigned bch2_extent_is_compressed(struct bkey_s_c k)
224 {
225         unsigned ret = 0;
226
227         switch (k.k->type) {
228         case BCH_EXTENT:
229         case BCH_EXTENT_CACHED: {
230                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
231                 const union bch_extent_entry *entry;
232                 struct extent_ptr_decoded p;
233
234                 extent_for_each_ptr_decode(e, p, entry)
235                         if (!p.ptr.cached &&
236                             p.crc.compression_type != BCH_COMPRESSION_NONE &&
237                             p.crc.compressed_size < p.crc.live_size)
238                                 ret += p.crc.compressed_size;
239         }
240         }
241
242         return ret;
243 }
244
245 bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
246                              struct bch_extent_ptr m, u64 offset)
247 {
248         const union bch_extent_entry *entry;
249         struct extent_ptr_decoded p;
250
251         extent_for_each_ptr_decode(e, p, entry)
252                 if (p.ptr.dev   == m.dev &&
253                     p.ptr.gen   == m.gen &&
254                     (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(e.k) ==
255                     (s64) m.offset  - offset)
256                         return true;
257
258         return false;
259 }
260
261 union bch_extent_entry *bch2_extent_drop_ptr(struct bkey_s_extent e,
262                                              struct bch_extent_ptr *ptr)
263 {
264         union bch_extent_entry *dst;
265         union bch_extent_entry *src;
266
267         EBUG_ON(ptr < &e.v->start->ptr ||
268                 ptr >= &extent_entry_last(e)->ptr);
269         EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
270
271         src = to_entry(ptr + 1);
272
273         if (src != extent_entry_last(e) &&
274             extent_entry_type(src) == BCH_EXTENT_ENTRY_ptr) {
275                 dst = to_entry(ptr);
276         } else {
277                 extent_for_each_entry(e, dst) {
278                         if (dst == to_entry(ptr))
279                                 break;
280
281                         if (extent_entry_next(dst) == to_entry(ptr) &&
282                             extent_entry_is_crc(dst))
283                                 break;
284                 }
285         }
286
287         memmove_u64s_down(dst, src,
288                           (u64 *) extent_entry_last(e) - (u64 *) src);
289         e.k->u64s -= (u64 *) src - (u64 *) dst;
290
291         return dst;
292 }
293
294 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
295                                   struct bch_extent_crc_unpacked n)
296 {
297         return !u.compression_type &&
298                 u.csum_type &&
299                 u.uncompressed_size > u.live_size &&
300                 bch2_csum_type_is_encryption(u.csum_type) ==
301                 bch2_csum_type_is_encryption(n.csum_type);
302 }
303
304 bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent e,
305                                  struct bch_extent_crc_unpacked n)
306 {
307         struct bch_extent_crc_unpacked crc;
308         const union bch_extent_entry *i;
309
310         if (!n.csum_type)
311                 return false;
312
313         extent_for_each_crc(e, crc, i)
314                 if (can_narrow_crc(crc, n))
315                         return true;
316
317         return false;
318 }
319
320 /*
321  * We're writing another replica for this extent, so while we've got the data in
322  * memory we'll be computing a new checksum for the currently live data.
323  *
324  * If there are other replicas we aren't moving, and they are checksummed but
325  * not compressed, we can modify them to point to only the data that is
326  * currently live (so that readers won't have to bounce) while we've got the
327  * checksum we need:
328  */
329 bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
330                              struct bch_extent_crc_unpacked n)
331 {
332         struct bch_extent_crc_unpacked u;
333         struct extent_ptr_decoded p;
334         union bch_extent_entry *i;
335         bool ret = false;
336
337         /* Find a checksum entry that covers only live data: */
338         if (!n.csum_type) {
339                 extent_for_each_crc(extent_i_to_s(e), u, i)
340                         if (!u.compression_type &&
341                             u.csum_type &&
342                             u.live_size == u.uncompressed_size) {
343                                 n = u;
344                                 goto found;
345                         }
346                 return false;
347         }
348 found:
349         BUG_ON(n.compression_type);
350         BUG_ON(n.offset);
351         BUG_ON(n.live_size != e->k.size);
352
353 restart_narrow_pointers:
354         extent_for_each_ptr_decode(extent_i_to_s(e), p, i)
355                 if (can_narrow_crc(p.crc, n)) {
356                         bch2_extent_drop_ptr(extent_i_to_s(e), &i->ptr);
357                         p.ptr.offset += p.crc.offset;
358                         p.crc = n;
359                         bch2_extent_ptr_decoded_append(e, &p);
360                         ret = true;
361                         goto restart_narrow_pointers;
362                 }
363
364         return ret;
365 }
366
367 /* returns true if not equal */
368 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
369                                          struct bch_extent_crc_unpacked r)
370 {
371         return (l.csum_type             != r.csum_type ||
372                 l.compression_type      != r.compression_type ||
373                 l.compressed_size       != r.compressed_size ||
374                 l.uncompressed_size     != r.uncompressed_size ||
375                 l.offset                != r.offset ||
376                 l.live_size             != r.live_size ||
377                 l.nonce                 != r.nonce ||
378                 bch2_crc_cmp(l.csum, r.csum));
379 }
380
381 static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
382 {
383         struct bch_extent_ptr *ptr;
384
385         bch2_extent_drop_ptrs(e, ptr,
386                 ptr->cached &&
387                 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
388 }
389
390 bool bch2_ptr_normalize(struct bch_fs *c, struct btree *b, struct bkey_s k)
391 {
392         return bch2_extent_normalize(c, k);
393 }
394
395 void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
396 {
397         switch (k->type) {
398         case BCH_EXTENT:
399         case BCH_EXTENT_CACHED: {
400                 union bch_extent_entry *entry;
401                 u64 *d = (u64 *) bkeyp_val(f, k);
402                 unsigned i;
403
404                 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
405                         d[i] = swab64(d[i]);
406
407                 for (entry = (union bch_extent_entry *) d;
408                      entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
409                      entry = extent_entry_next(entry)) {
410                         switch (extent_entry_type(entry)) {
411                         case BCH_EXTENT_ENTRY_ptr:
412                                 break;
413                         case BCH_EXTENT_ENTRY_crc32:
414                                 entry->crc32.csum = swab32(entry->crc32.csum);
415                                 break;
416                         case BCH_EXTENT_ENTRY_crc64:
417                                 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
418                                 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
419                                 break;
420                         case BCH_EXTENT_ENTRY_crc128:
421                                 entry->crc128.csum.hi = (__force __le64)
422                                         swab64((__force u64) entry->crc128.csum.hi);
423                                 entry->crc128.csum.lo = (__force __le64)
424                                         swab64((__force u64) entry->crc128.csum.lo);
425                                 break;
426                         }
427                 }
428                 break;
429         }
430         }
431 }
432
433 static const char *extent_ptr_invalid(const struct bch_fs *c,
434                                       struct bkey_s_c_extent e,
435                                       const struct bch_extent_ptr *ptr,
436                                       unsigned size_ondisk,
437                                       bool metadata)
438 {
439         const struct bch_extent_ptr *ptr2;
440         struct bch_dev *ca;
441
442         if (ptr->dev >= c->sb.nr_devices ||
443             !c->devs[ptr->dev])
444                 return "pointer to invalid device";
445
446         ca = bch_dev_bkey_exists(c, ptr->dev);
447         if (!ca)
448                 return "pointer to invalid device";
449
450         extent_for_each_ptr(e, ptr2)
451                 if (ptr != ptr2 && ptr->dev == ptr2->dev)
452                         return "multiple pointers to same device";
453
454         if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
455                 return "offset past end of device";
456
457         if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
458                 return "offset before first bucket";
459
460         if (bucket_remainder(ca, ptr->offset) +
461             size_ondisk > ca->mi.bucket_size)
462                 return "spans multiple buckets";
463
464         return NULL;
465 }
466
467 static void extent_print_ptrs(struct printbuf *out, struct bch_fs *c,
468                               struct bkey_s_c_extent e)
469 {
470         const union bch_extent_entry *entry;
471         struct bch_extent_crc_unpacked crc;
472         const struct bch_extent_ptr *ptr;
473         struct bch_dev *ca;
474         bool first = true;
475
476         extent_for_each_entry(e, entry) {
477                 if (!first)
478                         pr_buf(out, " ");
479
480                 switch (__extent_entry_type(entry)) {
481                 case BCH_EXTENT_ENTRY_crc32:
482                 case BCH_EXTENT_ENTRY_crc64:
483                 case BCH_EXTENT_ENTRY_crc128:
484                         crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
485
486                         pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
487                                crc.compressed_size,
488                                crc.uncompressed_size,
489                                crc.offset, crc.nonce,
490                                crc.csum_type,
491                                crc.compression_type);
492                         break;
493                 case BCH_EXTENT_ENTRY_ptr:
494                         ptr = entry_to_ptr(entry);
495                         ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
496                                 ? bch_dev_bkey_exists(c, ptr->dev)
497                                 : NULL;
498
499                         pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
500                                (u64) ptr->offset, ptr->gen,
501                                ptr->cached ? " cached" : "",
502                                ca && ptr_stale(ca, ptr)
503                                ? " stale" : "");
504                         break;
505                 default:
506                         pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
507                         goto out;
508                 }
509
510                 first = false;
511         }
512 out:
513         if (bkey_extent_is_cached(e.k))
514                 pr_buf(out, " cached");
515 }
516
517 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
518                                                    unsigned dev)
519 {
520         struct bch_dev_io_failures *i;
521
522         for (i = f->devs; i < f->devs + f->nr; i++)
523                 if (i->dev == dev)
524                         return i;
525
526         return NULL;
527 }
528
529 void bch2_mark_io_failure(struct bch_io_failures *failed,
530                           struct extent_ptr_decoded *p)
531 {
532         struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
533
534         if (!f) {
535                 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
536
537                 f = &failed->devs[failed->nr++];
538                 f->dev          = p->ptr.dev;
539                 f->nr_failed    = 1;
540                 f->nr_retries   = 0;
541         } else {
542                 f->nr_failed++;
543         }
544 }
545
546 /*
547  * returns true if p1 is better than p2:
548  */
549 static inline bool ptr_better(struct bch_fs *c,
550                               const struct extent_ptr_decoded p1,
551                               const struct extent_ptr_decoded p2)
552 {
553         struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
554         struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
555
556         u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
557         u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
558
559         /* Pick at random, biased in favor of the faster device: */
560
561         return bch2_rand_range(l1 + l2) > l1;
562 }
563
564 static int extent_pick_read_device(struct bch_fs *c,
565                                    struct bkey_s_c_extent e,
566                                    struct bch_io_failures *failed,
567                                    struct extent_ptr_decoded *pick)
568 {
569         const union bch_extent_entry *entry;
570         struct extent_ptr_decoded p;
571         struct bch_dev_io_failures *f;
572         struct bch_dev *ca;
573         int ret = 0;
574
575         extent_for_each_ptr_decode(e, p, entry) {
576                 ca = bch_dev_bkey_exists(c, p.ptr.dev);
577
578                 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
579                         continue;
580
581                 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
582                 if (f && f->nr_failed >= f->nr_retries)
583                         continue;
584
585                 if (ret && !ptr_better(c, p, *pick))
586                         continue;
587
588                 *pick = p;
589                 ret = 1;
590         }
591
592         return ret;
593 }
594
595 /* Btree ptrs */
596
597 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
598 {
599         if (bkey_extent_is_cached(k.k))
600                 return "cached";
601
602         if (k.k->size)
603                 return "nonzero key size";
604
605         if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
606                 return "value too big";
607
608         switch (k.k->type) {
609         case BCH_EXTENT: {
610                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
611                 const union bch_extent_entry *entry;
612                 const struct bch_extent_ptr *ptr;
613                 const char *reason;
614
615                 extent_for_each_entry(e, entry) {
616                         if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
617                                 return "invalid extent entry type";
618
619                         if (extent_entry_is_crc(entry))
620                                 return "has crc field";
621                 }
622
623                 extent_for_each_ptr(e, ptr) {
624                         reason = extent_ptr_invalid(c, e, ptr,
625                                                     c->opts.btree_node_size,
626                                                     true);
627                         if (reason)
628                                 return reason;
629                 }
630
631                 return NULL;
632         }
633
634         default:
635                 return "invalid value type";
636         }
637 }
638
639 void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
640                                struct bkey_s_c k)
641 {
642         struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
643         const struct bch_extent_ptr *ptr;
644         unsigned seq;
645         const char *err;
646         char buf[160];
647         struct bucket_mark mark;
648         struct bch_dev *ca;
649         unsigned replicas = 0;
650         bool bad;
651
652         extent_for_each_ptr(e, ptr) {
653                 ca = bch_dev_bkey_exists(c, ptr->dev);
654                 replicas++;
655
656                 if (!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags))
657                         continue;
658
659                 err = "stale";
660                 if (ptr_stale(ca, ptr))
661                         goto err;
662
663                 do {
664                         seq = read_seqcount_begin(&c->gc_pos_lock);
665                         mark = ptr_bucket_mark(ca, ptr);
666
667                         bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
668                                 (mark.data_type != BCH_DATA_BTREE ||
669                                  mark.dirty_sectors < c->opts.btree_node_size);
670                 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
671
672                 err = "inconsistent";
673                 if (bad)
674                         goto err;
675         }
676
677         if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
678             !bch2_bkey_replicas_marked(c, btree_node_type(b), e.s_c)) {
679                 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b), k);
680                 bch2_fs_bug(c,
681                         "btree key bad (replicas not marked in superblock):\n%s",
682                         buf);
683                 return;
684         }
685
686         return;
687 err:
688         bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b), k);
689         bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
690                     err, buf, PTR_BUCKET_NR(ca, ptr),
691                     mark.gen, (unsigned) mark.v.counter);
692 }
693
694 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
695                             struct bkey_s_c k)
696 {
697         const char *invalid;
698
699         if (bkey_extent_is_data(k.k))
700                 extent_print_ptrs(out, c, bkey_s_c_to_extent(k));
701
702         invalid = bch2_btree_ptr_invalid(c, k);
703         if (invalid)
704                 pr_buf(out, " invalid: %s", invalid);
705 }
706
707 int bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b,
708                         struct bch_io_failures *failed,
709                         struct extent_ptr_decoded *pick)
710 {
711         return extent_pick_read_device(c, bkey_i_to_s_c_extent(&b->key),
712                                        failed, pick);
713 }
714
715 /* Extents */
716
717 static bool __bch2_cut_front(struct bpos where, struct bkey_s k)
718 {
719         u64 len = 0;
720
721         if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
722                 return false;
723
724         EBUG_ON(bkey_cmp(where, k.k->p) > 0);
725
726         len = k.k->p.offset - where.offset;
727
728         BUG_ON(len > k.k->size);
729
730         /*
731          * Don't readjust offset if the key size is now 0, because that could
732          * cause offset to point to the next bucket:
733          */
734         if (!len)
735                 k.k->type = KEY_TYPE_DELETED;
736         else if (bkey_extent_is_data(k.k)) {
737                 struct bkey_s_extent e = bkey_s_to_extent(k);
738                 union bch_extent_entry *entry;
739                 bool seen_crc = false;
740
741                 extent_for_each_entry(e, entry) {
742                         switch (extent_entry_type(entry)) {
743                         case BCH_EXTENT_ENTRY_ptr:
744                                 if (!seen_crc)
745                                         entry->ptr.offset += e.k->size - len;
746                                 break;
747                         case BCH_EXTENT_ENTRY_crc32:
748                                 entry->crc32.offset += e.k->size - len;
749                                 break;
750                         case BCH_EXTENT_ENTRY_crc64:
751                                 entry->crc64.offset += e.k->size - len;
752                                 break;
753                         case BCH_EXTENT_ENTRY_crc128:
754                                 entry->crc128.offset += e.k->size - len;
755                                 break;
756                         }
757
758                         if (extent_entry_is_crc(entry))
759                                 seen_crc = true;
760                 }
761         }
762
763         k.k->size = len;
764
765         return true;
766 }
767
768 bool bch2_cut_front(struct bpos where, struct bkey_i *k)
769 {
770         return __bch2_cut_front(where, bkey_i_to_s(k));
771 }
772
773 bool bch2_cut_back(struct bpos where, struct bkey *k)
774 {
775         u64 len = 0;
776
777         if (bkey_cmp(where, k->p) >= 0)
778                 return false;
779
780         EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
781
782         len = where.offset - bkey_start_offset(k);
783
784         BUG_ON(len > k->size);
785
786         k->p = where;
787         k->size = len;
788
789         if (!len)
790                 k->type = KEY_TYPE_DELETED;
791
792         return true;
793 }
794
795 /**
796  * bch_key_resize - adjust size of @k
797  *
798  * bkey_start_offset(k) will be preserved, modifies where the extent ends
799  */
800 void bch2_key_resize(struct bkey *k,
801                     unsigned new_size)
802 {
803         k->p.offset -= k->size;
804         k->p.offset += new_size;
805         k->size = new_size;
806 }
807
808 /*
809  * In extent_sort_fix_overlapping(), insert_fixup_extent(),
810  * extent_merge_inline() - we're modifying keys in place that are packed. To do
811  * that we have to unpack the key, modify the unpacked key - then this
812  * copies/repacks the unpacked to the original as necessary.
813  */
814 static void extent_save(struct btree *b, struct bkey_packed *dst,
815                         struct bkey *src)
816 {
817         struct bkey_format *f = &b->format;
818         struct bkey_i *dst_unpacked;
819
820         if ((dst_unpacked = packed_to_bkey(dst)))
821                 dst_unpacked->k = *src;
822         else
823                 BUG_ON(!bch2_bkey_pack_key(dst, src, f));
824 }
825
826 static bool extent_i_save(struct btree *b, struct bkey_packed *dst,
827                           struct bkey_i *src)
828 {
829         struct bkey_format *f = &b->format;
830         struct bkey_i *dst_unpacked;
831         struct bkey_packed tmp;
832
833         if ((dst_unpacked = packed_to_bkey(dst)))
834                 dst_unpacked->k = src->k;
835         else if (bch2_bkey_pack_key(&tmp, &src->k, f))
836                 memcpy_u64s(dst, &tmp, f->key_u64s);
837         else
838                 return false;
839
840         memcpy_u64s(bkeyp_val(f, dst), &src->v, bkey_val_u64s(&src->k));
841         return true;
842 }
843
844 /*
845  * If keys compare equal, compare by pointer order:
846  *
847  * Necessary for sort_fix_overlapping() - if there are multiple keys that
848  * compare equal in different sets, we have to process them newest to oldest.
849  */
850 #define extent_sort_cmp(h, l, r)                                        \
851 ({                                                                      \
852         struct bkey _ul = bkey_unpack_key(b,                            \
853                                 __btree_node_offset_to_key(b, (l).k));  \
854         struct bkey _ur = bkey_unpack_key(b,                            \
855                                 __btree_node_offset_to_key(b, (r).k));  \
856                                                                         \
857         bkey_cmp(bkey_start_pos(&_ul),                                  \
858                  bkey_start_pos(&_ur)) ?: (r).k - (l).k;                \
859 })
860
861 static inline void extent_sort_sift(struct btree_node_iter_large *iter,
862                                     struct btree *b, size_t i)
863 {
864         heap_sift_down(iter, i, extent_sort_cmp, NULL);
865 }
866
867 static inline void extent_sort_next(struct btree_node_iter_large *iter,
868                                     struct btree *b,
869                                     struct btree_node_iter_set *i)
870 {
871         sort_key_next(iter, b, i);
872         heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL);
873 }
874
875 static void extent_sort_append(struct bch_fs *c,
876                                struct btree *b,
877                                struct btree_nr_keys *nr,
878                                struct bkey_packed *start,
879                                struct bkey_packed **prev,
880                                struct bkey_packed *k)
881 {
882         struct bkey_format *f = &b->format;
883         BKEY_PADDED(k) tmp;
884
885         if (bkey_whiteout(k))
886                 return;
887
888         bch2_bkey_unpack(b, &tmp.k, k);
889
890         if (*prev &&
891             bch2_extent_merge(c, b, (void *) *prev, &tmp.k))
892                 return;
893
894         if (*prev) {
895                 bch2_bkey_pack(*prev, (void *) *prev, f);
896
897                 btree_keys_account_key_add(nr, 0, *prev);
898                 *prev = bkey_next(*prev);
899         } else {
900                 *prev = start;
901         }
902
903         bkey_copy(*prev, &tmp.k);
904 }
905
906 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
907                                         struct bset *dst,
908                                         struct btree *b,
909                                         struct btree_node_iter_large *iter)
910 {
911         struct bkey_format *f = &b->format;
912         struct btree_node_iter_set *_l = iter->data, *_r;
913         struct bkey_packed *prev = NULL, *out, *lk, *rk;
914         struct bkey l_unpacked, r_unpacked;
915         struct bkey_s l, r;
916         struct btree_nr_keys nr;
917
918         memset(&nr, 0, sizeof(nr));
919
920         heap_resort(iter, extent_sort_cmp, NULL);
921
922         while (!bch2_btree_node_iter_large_end(iter)) {
923                 lk = __btree_node_offset_to_key(b, _l->k);
924
925                 if (iter->used == 1) {
926                         extent_sort_append(c, b, &nr, dst->start, &prev, lk);
927                         extent_sort_next(iter, b, _l);
928                         continue;
929                 }
930
931                 _r = iter->data + 1;
932                 if (iter->used > 2 &&
933                     extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
934                         _r++;
935
936                 rk = __btree_node_offset_to_key(b, _r->k);
937
938                 l = __bkey_disassemble(b, lk, &l_unpacked);
939                 r = __bkey_disassemble(b, rk, &r_unpacked);
940
941                 /* If current key and next key don't overlap, just append */
942                 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
943                         extent_sort_append(c, b, &nr, dst->start, &prev, lk);
944                         extent_sort_next(iter, b, _l);
945                         continue;
946                 }
947
948                 /* Skip 0 size keys */
949                 if (!r.k->size) {
950                         extent_sort_next(iter, b, _r);
951                         continue;
952                 }
953
954                 /*
955                  * overlap: keep the newer key and trim the older key so they
956                  * don't overlap. comparing pointers tells us which one is
957                  * newer, since the bsets are appended one after the other.
958                  */
959
960                 /* can't happen because of comparison func */
961                 BUG_ON(_l->k < _r->k &&
962                        !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
963
964                 if (_l->k > _r->k) {
965                         /* l wins, trim r */
966                         if (bkey_cmp(l.k->p, r.k->p) >= 0) {
967                                 sort_key_next(iter, b, _r);
968                         } else {
969                                 __bch2_cut_front(l.k->p, r);
970                                 extent_save(b, rk, r.k);
971                         }
972
973                         extent_sort_sift(iter, b, _r - iter->data);
974                 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
975                         BKEY_PADDED(k) tmp;
976
977                         /*
978                          * r wins, but it overlaps in the middle of l - split l:
979                          */
980                         bkey_reassemble(&tmp.k, l.s_c);
981                         bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
982
983                         __bch2_cut_front(r.k->p, l);
984                         extent_save(b, lk, l.k);
985
986                         extent_sort_sift(iter, b, 0);
987
988                         extent_sort_append(c, b, &nr, dst->start, &prev,
989                                            bkey_to_packed(&tmp.k));
990                 } else {
991                         bch2_cut_back(bkey_start_pos(r.k), l.k);
992                         extent_save(b, lk, l.k);
993                 }
994         }
995
996         if (prev) {
997                 bch2_bkey_pack(prev, (void *) prev, f);
998                 btree_keys_account_key_add(&nr, 0, prev);
999                 out = bkey_next(prev);
1000         } else {
1001                 out = dst->start;
1002         }
1003
1004         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
1005         return nr;
1006 }
1007
1008 struct extent_insert_state {
1009         struct btree_insert             *trans;
1010         struct btree_insert_entry       *insert;
1011         struct bpos                     committed;
1012         struct bch_fs_usage             stats;
1013
1014         /* for deleting: */
1015         struct bkey_i                   whiteout;
1016         bool                            update_journal;
1017         bool                            update_btree;
1018         bool                            deleting;
1019 };
1020
1021 static void bch2_add_sectors(struct extent_insert_state *s,
1022                              struct bkey_s_c k, u64 offset, s64 sectors)
1023 {
1024         struct bch_fs *c = s->trans->c;
1025         struct btree *b = s->insert->iter->l[0].b;
1026
1027         EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
1028
1029         if (!sectors)
1030                 return;
1031
1032         bch2_mark_key(c, BKEY_TYPE_EXTENTS, k, sectors > 0, sectors,
1033                       gc_pos_btree_node(b), &s->stats,
1034                       s->trans->journal_res.seq, 0);
1035 }
1036
1037 static void bch2_subtract_sectors(struct extent_insert_state *s,
1038                                  struct bkey_s_c k, u64 offset, s64 sectors)
1039 {
1040         bch2_add_sectors(s, k, offset, -sectors);
1041 }
1042
1043 /* These wrappers subtract exactly the sectors that we're removing from @k */
1044 static void bch2_cut_subtract_back(struct extent_insert_state *s,
1045                                   struct bpos where, struct bkey_s k)
1046 {
1047         bch2_subtract_sectors(s, k.s_c, where.offset,
1048                              k.k->p.offset - where.offset);
1049         bch2_cut_back(where, k.k);
1050 }
1051
1052 static void bch2_cut_subtract_front(struct extent_insert_state *s,
1053                                    struct bpos where, struct bkey_s k)
1054 {
1055         bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
1056                              where.offset - bkey_start_offset(k.k));
1057         __bch2_cut_front(where, k);
1058 }
1059
1060 static void bch2_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
1061 {
1062         if (k.k->size)
1063                 bch2_subtract_sectors(s, k.s_c,
1064                                      bkey_start_offset(k.k), k.k->size);
1065         k.k->size = 0;
1066         k.k->type = KEY_TYPE_DELETED;
1067 }
1068
1069 static bool bch2_extent_merge_inline(struct bch_fs *,
1070                                      struct btree_iter *,
1071                                      struct bkey_packed *,
1072                                      struct bkey_packed *,
1073                                      bool);
1074
1075 static void verify_extent_nonoverlapping(struct btree *b,
1076                                          struct btree_node_iter *_iter,
1077                                          struct bkey_i *insert)
1078 {
1079 #ifdef CONFIG_BCACHEFS_DEBUG
1080         struct btree_node_iter iter;
1081         struct bkey_packed *k;
1082         struct bkey uk;
1083
1084         iter = *_iter;
1085         k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_DISCARD);
1086         BUG_ON(k &&
1087                (uk = bkey_unpack_key(b, k),
1088                 bkey_cmp(uk.p, bkey_start_pos(&insert->k)) > 0));
1089
1090         iter = *_iter;
1091         k = bch2_btree_node_iter_peek_filter(&iter, b, KEY_TYPE_DISCARD);
1092 #if 0
1093         BUG_ON(k &&
1094                (uk = bkey_unpack_key(b, k),
1095                 bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0);
1096 #else
1097         if (k &&
1098             (uk = bkey_unpack_key(b, k),
1099              bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0) {
1100                 char buf1[100];
1101                 char buf2[100];
1102
1103                 bch2_bkey_to_text(&PBUF(buf1), &insert->k);
1104                 bch2_bkey_to_text(&PBUF(buf2), &uk);
1105
1106                 bch2_dump_btree_node(b);
1107                 panic("insert > next :\n"
1108                       "insert %s\n"
1109                       "next   %s\n",
1110                       buf1, buf2);
1111         }
1112 #endif
1113
1114 #endif
1115 }
1116
1117 static void verify_modified_extent(struct btree_iter *iter,
1118                                    struct bkey_packed *k)
1119 {
1120         bch2_btree_iter_verify(iter, iter->l[0].b);
1121         bch2_verify_insert_pos(iter->l[0].b, k, k, k->u64s);
1122 }
1123
1124 static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
1125                                struct bkey_i *insert)
1126 {
1127         struct btree_iter_level *l = &iter->l[0];
1128         struct btree_node_iter node_iter;
1129         struct bkey_packed *k;
1130
1131         BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
1132
1133         EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
1134         verify_extent_nonoverlapping(l->b, &l->iter, insert);
1135
1136         node_iter = l->iter;
1137         k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_DISCARD);
1138         if (k && !bkey_written(l->b, k) &&
1139             bch2_extent_merge_inline(c, iter, k, bkey_to_packed(insert), true))
1140                 return;
1141
1142         node_iter = l->iter;
1143         k = bch2_btree_node_iter_peek_filter(&node_iter, l->b, KEY_TYPE_DISCARD);
1144         if (k && !bkey_written(l->b, k) &&
1145             bch2_extent_merge_inline(c, iter, bkey_to_packed(insert), k, false))
1146                 return;
1147
1148         k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
1149
1150         bch2_bset_insert(l->b, &l->iter, k, insert, 0);
1151         bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
1152         bch2_btree_iter_verify(iter, l->b);
1153 }
1154
1155 static void extent_insert_committed(struct extent_insert_state *s)
1156 {
1157         struct bch_fs *c = s->trans->c;
1158         struct btree_iter *iter = s->insert->iter;
1159         struct bkey_i *insert = s->insert->k;
1160         BKEY_PADDED(k) split;
1161
1162         EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
1163         EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
1164
1165         bkey_copy(&split.k, insert);
1166         if (s->deleting)
1167                 split.k.k.type = KEY_TYPE_DISCARD;
1168
1169         if (!(s->trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1170                 bch2_cut_subtract_back(s, s->committed,
1171                                        bkey_i_to_s(&split.k));
1172         else
1173                 bch2_cut_back(s->committed, &split.k.k);
1174
1175         if (!bkey_cmp(s->committed, iter->pos))
1176                 return;
1177
1178         bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1179
1180         if (s->update_btree) {
1181                 if (debug_check_bkeys(c))
1182                         bch2_bkey_debugcheck(c, iter->l[0].b,
1183                                              bkey_i_to_s_c(&split.k));
1184
1185                 EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
1186
1187                 extent_bset_insert(c, iter, &split.k);
1188         }
1189
1190         if (s->update_journal) {
1191                 bkey_copy(&split.k, !s->deleting ? insert : &s->whiteout);
1192                 if (s->deleting)
1193                         split.k.k.type = KEY_TYPE_DISCARD;
1194
1195                 bch2_cut_back(s->committed, &split.k.k);
1196
1197                 EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
1198
1199                 bch2_btree_journal_key(s->trans, iter, &split.k);
1200         }
1201
1202         bch2_cut_front(s->committed, insert);
1203
1204         insert->k.needs_whiteout        = false;
1205         s->trans->did_work              = true;
1206 }
1207
1208 void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
1209 {
1210         struct btree *b = iter->l[0].b;
1211
1212         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1213
1214         bch2_cut_back(b->key.k.p, &k->k);
1215
1216         BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
1217 }
1218
1219 enum btree_insert_ret
1220 bch2_extent_can_insert(struct btree_insert *trans,
1221                        struct btree_insert_entry *insert,
1222                        unsigned *u64s)
1223 {
1224         struct btree_iter_level *l = &insert->iter->l[0];
1225         struct btree_node_iter node_iter = l->iter;
1226         enum bch_extent_overlap overlap;
1227         struct bkey_packed *_k;
1228         struct bkey unpacked;
1229         struct bkey_s_c k;
1230         int sectors;
1231
1232         BUG_ON(trans->flags & BTREE_INSERT_ATOMIC &&
1233                !bch2_extent_is_atomic(&insert->k->k, insert->iter));
1234
1235         /*
1236          * We avoid creating whiteouts whenever possible when deleting, but
1237          * those optimizations mean we may potentially insert two whiteouts
1238          * instead of one (when we overlap with the front of one extent and the
1239          * back of another):
1240          */
1241         if (bkey_whiteout(&insert->k->k))
1242                 *u64s += BKEY_U64s;
1243
1244         _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
1245                                               KEY_TYPE_DISCARD);
1246         if (!_k)
1247                 return BTREE_INSERT_OK;
1248
1249         k = bkey_disassemble(l->b, _k, &unpacked);
1250
1251         overlap = bch2_extent_overlap(&insert->k->k, k.k);
1252
1253         /* account for having to split existing extent: */
1254         if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
1255                 *u64s += _k->u64s;
1256
1257         if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
1258             (sectors = bch2_extent_is_compressed(k))) {
1259                 int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
1260
1261                 if (trans->flags & BTREE_INSERT_NOFAIL)
1262                         flags |= BCH_DISK_RESERVATION_NOFAIL;
1263
1264                 switch (bch2_disk_reservation_add(trans->c,
1265                                 trans->disk_res,
1266                                 sectors, flags)) {
1267                 case 0:
1268                         break;
1269                 case -ENOSPC:
1270                         return BTREE_INSERT_ENOSPC;
1271                 case -EINTR:
1272                         return BTREE_INSERT_NEED_GC_LOCK;
1273                 default:
1274                         BUG();
1275                 }
1276         }
1277
1278         return BTREE_INSERT_OK;
1279 }
1280
1281 static void
1282 extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
1283               struct bkey_packed *_k, struct bkey_s k,
1284               enum bch_extent_overlap overlap)
1285 {
1286         struct bch_fs *c = s->trans->c;
1287         struct btree_iter *iter = s->insert->iter;
1288         struct btree_iter_level *l = &iter->l[0];
1289
1290         switch (overlap) {
1291         case BCH_EXTENT_OVERLAP_FRONT:
1292                 /* insert overlaps with start of k: */
1293                 bch2_cut_subtract_front(s, insert->k.p, k);
1294                 BUG_ON(bkey_deleted(k.k));
1295                 extent_save(l->b, _k, k.k);
1296                 verify_modified_extent(iter, _k);
1297                 break;
1298
1299         case BCH_EXTENT_OVERLAP_BACK:
1300                 /* insert overlaps with end of k: */
1301                 bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
1302                 BUG_ON(bkey_deleted(k.k));
1303                 extent_save(l->b, _k, k.k);
1304
1305                 /*
1306                  * As the auxiliary tree is indexed by the end of the
1307                  * key and we've just changed the end, update the
1308                  * auxiliary tree.
1309                  */
1310                 bch2_bset_fix_invalidated_key(l->b, _k);
1311                 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1312                                          _k, _k->u64s, _k->u64s);
1313                 verify_modified_extent(iter, _k);
1314                 break;
1315
1316         case BCH_EXTENT_OVERLAP_ALL: {
1317                 /* The insert key completely covers k, invalidate k */
1318                 if (!bkey_whiteout(k.k))
1319                         btree_account_key_drop(l->b, _k);
1320
1321                 bch2_drop_subtract(s, k);
1322
1323                 if (_k >= btree_bset_last(l->b)->start) {
1324                         unsigned u64s = _k->u64s;
1325
1326                         bch2_bset_delete(l->b, _k, _k->u64s);
1327                         bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1328                                                  _k, u64s, 0);
1329                         bch2_btree_iter_verify(iter, l->b);
1330                 } else {
1331                         extent_save(l->b, _k, k.k);
1332                         bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1333                                                  _k, _k->u64s, _k->u64s);
1334                         verify_modified_extent(iter, _k);
1335                 }
1336
1337                 break;
1338         }
1339         case BCH_EXTENT_OVERLAP_MIDDLE: {
1340                 BKEY_PADDED(k) split;
1341                 /*
1342                  * The insert key falls 'in the middle' of k
1343                  * The insert key splits k in 3:
1344                  * - start only in k, preserve
1345                  * - middle common section, invalidate in k
1346                  * - end only in k, preserve
1347                  *
1348                  * We update the old key to preserve the start,
1349                  * insert will be the new common section,
1350                  * we manually insert the end that we are preserving.
1351                  *
1352                  * modify k _before_ doing the insert (which will move
1353                  * what k points to)
1354                  */
1355                 bkey_reassemble(&split.k, k.s_c);
1356                 split.k.k.needs_whiteout |= bkey_written(l->b, _k);
1357
1358                 bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1359                 BUG_ON(bkey_deleted(&split.k.k));
1360
1361                 bch2_cut_subtract_front(s, insert->k.p, k);
1362                 BUG_ON(bkey_deleted(k.k));
1363                 extent_save(l->b, _k, k.k);
1364                 verify_modified_extent(iter, _k);
1365
1366                 bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
1367                                 bkey_start_offset(&split.k.k),
1368                                 split.k.k.size);
1369                 extent_bset_insert(c, iter, &split.k);
1370                 break;
1371         }
1372         }
1373 }
1374
1375 static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
1376 {
1377         struct btree_iter *iter = s->insert->iter;
1378         struct btree_iter_level *l = &iter->l[0];
1379         struct bkey_packed *_k;
1380         struct bkey unpacked;
1381         struct bkey_i *insert = s->insert->k;
1382
1383         while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1384                (_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
1385                                                       KEY_TYPE_DISCARD))) {
1386                 struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
1387                 enum bch_extent_overlap overlap = bch2_extent_overlap(&insert->k, k.k);
1388
1389                 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1390
1391                 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1392                         break;
1393
1394                 s->committed = bpos_min(s->insert->k->k.p, k.k->p);
1395
1396                 if (!bkey_whiteout(k.k))
1397                         s->update_journal = true;
1398
1399                 if (!s->update_journal) {
1400                         bch2_cut_front(s->committed, insert);
1401                         bch2_cut_front(s->committed, &s->whiteout);
1402                         bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1403                         goto next;
1404                 }
1405
1406                 /*
1407                  * When deleting, if possible just do it by switching the type
1408                  * of the key we're deleting, instead of creating and inserting
1409                  * a new whiteout:
1410                  */
1411                 if (s->deleting &&
1412                     !s->update_btree &&
1413                     !bkey_cmp(insert->k.p, k.k->p) &&
1414                     !bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
1415                         if (!bkey_whiteout(k.k)) {
1416                                 btree_account_key_drop(l->b, _k);
1417                                 bch2_subtract_sectors(s, k.s_c,
1418                                                       bkey_start_offset(k.k), k.k->size);
1419                                 _k->type = KEY_TYPE_DISCARD;
1420                                 reserve_whiteout(l->b, _k);
1421                         }
1422                         break;
1423                 }
1424
1425                 if (k.k->needs_whiteout || bkey_written(l->b, _k)) {
1426                         insert->k.needs_whiteout = true;
1427                         s->update_btree = true;
1428                 }
1429
1430                 if (s->update_btree &&
1431                     overlap == BCH_EXTENT_OVERLAP_ALL &&
1432                     bkey_whiteout(k.k) &&
1433                     k.k->needs_whiteout) {
1434                         unreserve_whiteout(l->b, _k);
1435                         _k->needs_whiteout = false;
1436                 }
1437
1438                 extent_squash(s, insert, _k, k, overlap);
1439
1440                 if (!s->update_btree)
1441                         bch2_cut_front(s->committed, insert);
1442 next:
1443                 if (overlap == BCH_EXTENT_OVERLAP_FRONT ||
1444                     overlap == BCH_EXTENT_OVERLAP_MIDDLE)
1445                         break;
1446         }
1447
1448         if (bkey_cmp(s->committed, insert->k.p) < 0)
1449                 s->committed = bpos_min(s->insert->k->k.p, l->b->key.k.p);
1450
1451         /*
1452          * may have skipped past some deleted extents greater than the insert
1453          * key, before we got to a non deleted extent and knew we could bail out
1454          * rewind the iterator a bit if necessary:
1455          */
1456         {
1457                 struct btree_node_iter node_iter = l->iter;
1458
1459                 while ((_k = bch2_btree_node_iter_prev_all(&node_iter, l->b)) &&
1460                        bkey_cmp_left_packed(l->b, _k, &s->committed) > 0)
1461                         l->iter = node_iter;
1462         }
1463 }
1464
1465 /**
1466  * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1467  *
1468  * this may result in not actually doing the insert, or inserting some subset
1469  * of the insert key. For cmpxchg operations this is where that logic lives.
1470  *
1471  * All subsets of @insert that need to be inserted are inserted using
1472  * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
1473  * returns false, setting @iter->pos for the prefix of @insert that actually got
1474  * inserted.
1475  *
1476  * BSET INVARIANTS: this function is responsible for maintaining all the
1477  * invariants for bsets of extents in memory. things get really hairy with 0
1478  * size extents
1479  *
1480  * within one bset:
1481  *
1482  * bkey_start_pos(bkey_next(k)) >= k
1483  * or bkey_start_offset(bkey_next(k)) >= k->offset
1484  *
1485  * i.e. strict ordering, no overlapping extents.
1486  *
1487  * multiple bsets (i.e. full btree node):
1488  *
1489  * âˆ€ k, j
1490  *   k.size != 0 âˆ§ j.size != 0 â†’
1491  *     Â¬ (k > bkey_start_pos(j) âˆ§ k < j)
1492  *
1493  * i.e. no two overlapping keys _of nonzero size_
1494  *
1495  * We can't realistically maintain this invariant for zero size keys because of
1496  * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
1497  * there may be another 0 size key between them in another bset, and it will
1498  * thus overlap with the merged key.
1499  *
1500  * In addition, the end of iter->pos indicates how much has been processed.
1501  * If the end of iter->pos is not the same as the end of insert, then
1502  * key insertion needs to continue/be retried.
1503  */
1504 enum btree_insert_ret
1505 bch2_insert_fixup_extent(struct btree_insert *trans,
1506                          struct btree_insert_entry *insert)
1507 {
1508         struct bch_fs *c        = trans->c;
1509         struct btree_iter *iter = insert->iter;
1510         struct btree *b         = iter->l[0].b;
1511         struct extent_insert_state s = {
1512                 .trans          = trans,
1513                 .insert         = insert,
1514                 .committed      = iter->pos,
1515
1516                 .whiteout       = *insert->k,
1517                 .update_journal = !bkey_whiteout(&insert->k->k),
1518                 .update_btree   = !bkey_whiteout(&insert->k->k),
1519                 .deleting       = bkey_whiteout(&insert->k->k),
1520         };
1521
1522         EBUG_ON(iter->level);
1523         EBUG_ON(!insert->k->k.size);
1524
1525         /*
1526          * As we process overlapping extents, we advance @iter->pos both to
1527          * signal to our caller (btree_insert_key()) how much of @insert->k has
1528          * been inserted, and also to keep @iter->pos consistent with
1529          * @insert->k and the node iterator that we're advancing:
1530          */
1531         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1532
1533         if (!s.deleting &&
1534             !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1535                 bch2_add_sectors(&s, bkey_i_to_s_c(insert->k),
1536                                 bkey_start_offset(&insert->k->k),
1537                                 insert->k->k.size);
1538
1539         __bch2_insert_fixup_extent(&s);
1540
1541         extent_insert_committed(&s);
1542
1543         bch2_fs_usage_apply(c, &s.stats, trans->disk_res,
1544                            gc_pos_btree_node(b));
1545
1546         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1547         EBUG_ON(bkey_cmp(iter->pos, s.committed));
1548
1549         if (insert->k->k.size) {
1550                 /* got to the end of this leaf node */
1551                 BUG_ON(bkey_cmp(iter->pos, b->key.k.p));
1552                 return BTREE_INSERT_NEED_TRAVERSE;
1553         }
1554
1555         return BTREE_INSERT_OK;
1556 }
1557
1558 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
1559 {
1560         if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
1561                 return "value too big";
1562
1563         if (!k.k->size)
1564                 return "zero key size";
1565
1566         switch (k.k->type) {
1567         case BCH_EXTENT:
1568         case BCH_EXTENT_CACHED: {
1569                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1570                 const union bch_extent_entry *entry;
1571                 struct bch_extent_crc_unpacked crc;
1572                 const struct bch_extent_ptr *ptr;
1573                 unsigned size_ondisk = e.k->size;
1574                 const char *reason;
1575                 unsigned nonce = UINT_MAX;
1576
1577                 extent_for_each_entry(e, entry) {
1578                         if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1579                                 return "invalid extent entry type";
1580
1581                         if (extent_entry_is_crc(entry)) {
1582                                 crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
1583
1584                                 if (crc.offset + e.k->size >
1585                                     crc.uncompressed_size)
1586                                         return "checksum offset + key size > uncompressed size";
1587
1588                                 size_ondisk = crc.compressed_size;
1589
1590                                 if (!bch2_checksum_type_valid(c, crc.csum_type))
1591                                         return "invalid checksum type";
1592
1593                                 if (crc.compression_type >= BCH_COMPRESSION_NR)
1594                                         return "invalid compression type";
1595
1596                                 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1597                                         if (nonce == UINT_MAX)
1598                                                 nonce = crc.offset + crc.nonce;
1599                                         else if (nonce != crc.offset + crc.nonce)
1600                                                 return "incorrect nonce";
1601                                 }
1602                         } else {
1603                                 ptr = entry_to_ptr(entry);
1604
1605                                 reason = extent_ptr_invalid(c, e, &entry->ptr,
1606                                                             size_ondisk, false);
1607                                 if (reason)
1608                                         return reason;
1609                         }
1610                 }
1611
1612                 return NULL;
1613         }
1614
1615         case BCH_RESERVATION: {
1616                 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1617
1618                 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1619                         return "incorrect value size";
1620
1621                 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1622                         return "invalid nr_replicas";
1623
1624                 return NULL;
1625         }
1626
1627         default:
1628                 return "invalid value type";
1629         }
1630 }
1631
1632 static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
1633                                           struct bkey_s_c_extent e)
1634 {
1635         const struct bch_extent_ptr *ptr;
1636         struct bch_dev *ca;
1637         struct bucket_mark mark;
1638         unsigned seq, stale;
1639         char buf[160];
1640         bool bad;
1641         unsigned replicas = 0;
1642
1643         /*
1644          * XXX: we should be doing most/all of these checks at startup time,
1645          * where we check bch2_bkey_invalid() in btree_node_read_done()
1646          *
1647          * But note that we can't check for stale pointers or incorrect gc marks
1648          * until after journal replay is done (it might be an extent that's
1649          * going to get overwritten during replay)
1650          */
1651
1652         extent_for_each_ptr(e, ptr) {
1653                 ca = bch_dev_bkey_exists(c, ptr->dev);
1654                 replicas++;
1655
1656                 /*
1657                  * If journal replay hasn't finished, we might be seeing keys
1658                  * that will be overwritten by the time journal replay is done:
1659                  */
1660                 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1661                         continue;
1662
1663                 stale = 0;
1664
1665                 do {
1666                         seq = read_seqcount_begin(&c->gc_pos_lock);
1667                         mark = ptr_bucket_mark(ca, ptr);
1668
1669                         /* between mark and bucket gen */
1670                         smp_rmb();
1671
1672                         stale = ptr_stale(ca, ptr);
1673
1674                         bch2_fs_bug_on(stale && !ptr->cached, c,
1675                                          "stale dirty pointer");
1676
1677                         bch2_fs_bug_on(stale > 96, c,
1678                                          "key too stale: %i",
1679                                          stale);
1680
1681                         if (stale)
1682                                 break;
1683
1684                         bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
1685                                 (mark.data_type != BCH_DATA_USER ||
1686                                  !(ptr->cached
1687                                    ? mark.cached_sectors
1688                                    : mark.dirty_sectors));
1689                 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
1690
1691                 if (bad)
1692                         goto bad_ptr;
1693         }
1694
1695         if (replicas > BCH_REPLICAS_MAX) {
1696                 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b),
1697                                       e.s_c);
1698                 bch2_fs_bug(c,
1699                         "extent key bad (too many replicas: %u): %s",
1700                         replicas, buf);
1701                 return;
1702         }
1703
1704         if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1705             !bch2_bkey_replicas_marked(c, btree_node_type(b), e.s_c)) {
1706                 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b),
1707                                       e.s_c);
1708                 bch2_fs_bug(c,
1709                         "extent key bad (replicas not marked in superblock):\n%s",
1710                         buf);
1711                 return;
1712         }
1713
1714         return;
1715
1716 bad_ptr:
1717         bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b),
1718                               e.s_c);
1719         bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu "
1720                    "gen %i type %u", buf,
1721                    PTR_BUCKET_NR(ca, ptr), mark.gen, mark.data_type);
1722 }
1723
1724 void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
1725 {
1726         switch (k.k->type) {
1727         case BCH_EXTENT:
1728         case BCH_EXTENT_CACHED:
1729                 bch2_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
1730                 break;
1731         case BCH_RESERVATION:
1732                 break;
1733         default:
1734                 BUG();
1735         }
1736 }
1737
1738 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
1739                          struct bkey_s_c k)
1740 {
1741         const char *invalid;
1742
1743         if (bkey_extent_is_data(k.k))
1744                 extent_print_ptrs(out, c, bkey_s_c_to_extent(k));
1745
1746         invalid = bch2_extent_invalid(c, k);
1747         if (invalid)
1748                 pr_buf(out, " invalid: %s", invalid);
1749 }
1750
1751 static void bch2_extent_crc_init(union bch_extent_crc *crc,
1752                                  struct bch_extent_crc_unpacked new)
1753 {
1754 #define common_fields(_crc)                                             \
1755                 .csum_type              = _crc.csum_type,               \
1756                 .compression_type       = _crc.compression_type,        \
1757                 ._compressed_size       = _crc.compressed_size - 1,     \
1758                 ._uncompressed_size     = _crc.uncompressed_size - 1,   \
1759                 .offset                 = _crc.offset
1760
1761         if (bch_crc_bytes[new.csum_type]        <= 4 &&
1762             new.uncompressed_size               <= CRC32_SIZE_MAX &&
1763             new.nonce                           <= CRC32_NONCE_MAX) {
1764                 crc->crc32 = (struct bch_extent_crc32) {
1765                         .type = 1 << BCH_EXTENT_ENTRY_crc32,
1766                         common_fields(new),
1767                         .csum                   = *((__le32 *) &new.csum.lo),
1768                 };
1769                 return;
1770         }
1771
1772         if (bch_crc_bytes[new.csum_type]        <= 10 &&
1773             new.uncompressed_size               <= CRC64_SIZE_MAX &&
1774             new.nonce                           <= CRC64_NONCE_MAX) {
1775                 crc->crc64 = (struct bch_extent_crc64) {
1776                         .type = 1 << BCH_EXTENT_ENTRY_crc64,
1777                         common_fields(new),
1778                         .nonce                  = new.nonce,
1779                         .csum_lo                = new.csum.lo,
1780                         .csum_hi                = *((__le16 *) &new.csum.hi),
1781                 };
1782                 return;
1783         }
1784
1785         if (bch_crc_bytes[new.csum_type]        <= 16 &&
1786             new.uncompressed_size               <= CRC128_SIZE_MAX &&
1787             new.nonce                           <= CRC128_NONCE_MAX) {
1788                 crc->crc128 = (struct bch_extent_crc128) {
1789                         .type = 1 << BCH_EXTENT_ENTRY_crc128,
1790                         common_fields(new),
1791                         .nonce                  = new.nonce,
1792                         .csum                   = new.csum,
1793                 };
1794                 return;
1795         }
1796 #undef common_fields
1797         BUG();
1798 }
1799
1800 void bch2_extent_crc_append(struct bkey_i_extent *e,
1801                             struct bch_extent_crc_unpacked new)
1802 {
1803         bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), new);
1804         __extent_entry_push(e);
1805 }
1806
1807 void bch2_extent_ptr_decoded_append(struct bkey_i_extent *e,
1808                                     struct extent_ptr_decoded *p)
1809 {
1810         struct bch_extent_crc_unpacked crc;
1811         union bch_extent_entry *pos;
1812
1813         extent_for_each_crc(extent_i_to_s(e), crc, pos)
1814                 if (!bch2_crc_unpacked_cmp(crc, p->crc))
1815                         goto found;
1816
1817         bch2_extent_crc_append(e, p->crc);
1818         pos = extent_entry_last(extent_i_to_s(e));
1819 found:
1820         p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
1821         __extent_entry_insert(e, pos, to_entry(&p->ptr));
1822 }
1823
1824 /*
1825  * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1826  *
1827  * Returns true if @k should be dropped entirely
1828  *
1829  * For existing keys, only called when btree nodes are being rewritten, not when
1830  * they're merely being compacted/resorted in memory.
1831  */
1832 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1833 {
1834         struct bkey_s_extent e;
1835
1836         switch (k.k->type) {
1837         case KEY_TYPE_ERROR:
1838                 return false;
1839
1840         case KEY_TYPE_DELETED:
1841                 return true;
1842         case KEY_TYPE_DISCARD:
1843                 return bversion_zero(k.k->version);
1844         case KEY_TYPE_COOKIE:
1845                 return false;
1846
1847         case BCH_EXTENT:
1848         case BCH_EXTENT_CACHED:
1849                 e = bkey_s_to_extent(k);
1850
1851                 bch2_extent_drop_stale(c, e);
1852
1853                 if (!bkey_val_u64s(e.k)) {
1854                         if (bkey_extent_is_cached(e.k)) {
1855                                 k.k->type = KEY_TYPE_DISCARD;
1856                                 if (bversion_zero(k.k->version))
1857                                         return true;
1858                         } else {
1859                                 k.k->type = KEY_TYPE_ERROR;
1860                         }
1861                 }
1862
1863                 return false;
1864         case BCH_RESERVATION:
1865                 return false;
1866         default:
1867                 BUG();
1868         }
1869 }
1870
1871 void bch2_extent_mark_replicas_cached(struct bch_fs *c,
1872                                       struct bkey_s_extent e,
1873                                       unsigned target,
1874                                       unsigned nr_desired_replicas)
1875 {
1876         struct bch_extent_ptr *ptr;
1877         int extra = bch2_extent_durability(c, e.c) - nr_desired_replicas;
1878
1879         if (target && extra > 0)
1880                 extent_for_each_ptr(e, ptr) {
1881                         int n = bch2_extent_ptr_durability(c, ptr);
1882
1883                         if (n && n <= extra &&
1884                             !bch2_dev_in_target(c, ptr->dev, target)) {
1885                                 ptr->cached = true;
1886                                 extra -= n;
1887                         }
1888                 }
1889
1890         if (extra > 0)
1891                 extent_for_each_ptr(e, ptr) {
1892                         int n = bch2_extent_ptr_durability(c, ptr);
1893
1894                         if (n && n <= extra) {
1895                                 ptr->cached = true;
1896                                 extra -= n;
1897                         }
1898                 }
1899 }
1900
1901 /*
1902  * This picks a non-stale pointer, preferably from a device other than @avoid.
1903  * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
1904  * other devices, it will still pick a pointer from avoid.
1905  */
1906 int bch2_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k,
1907                          struct bch_io_failures *failed,
1908                          struct extent_ptr_decoded *pick)
1909 {
1910         int ret;
1911
1912         switch (k.k->type) {
1913         case KEY_TYPE_ERROR:
1914                 return -EIO;
1915
1916         case BCH_EXTENT:
1917         case BCH_EXTENT_CACHED:
1918                 ret = extent_pick_read_device(c, bkey_s_c_to_extent(k),
1919                                               failed, pick);
1920
1921                 if (!ret && !bkey_extent_is_cached(k.k))
1922                         ret = -EIO;
1923
1924                 return ret;
1925
1926         default:
1927                 return 0;
1928         }
1929 }
1930
1931 enum merge_result bch2_extent_merge(struct bch_fs *c, struct btree *b,
1932                                     struct bkey_i *l, struct bkey_i *r)
1933 {
1934         struct bkey_s_extent el, er;
1935         union bch_extent_entry *en_l, *en_r;
1936
1937         if (key_merging_disabled(c))
1938                 return BCH_MERGE_NOMERGE;
1939
1940         /*
1941          * Generic header checks
1942          * Assumes left and right are in order
1943          * Left and right must be exactly aligned
1944          */
1945
1946         if (l->k.u64s           != r->k.u64s ||
1947             l->k.type           != r->k.type ||
1948             bversion_cmp(l->k.version, r->k.version) ||
1949             bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
1950                 return BCH_MERGE_NOMERGE;
1951
1952         switch (l->k.type) {
1953         case KEY_TYPE_DISCARD:
1954         case KEY_TYPE_ERROR:
1955                 /* These types are mergeable, and no val to check */
1956                 break;
1957
1958         case BCH_EXTENT:
1959         case BCH_EXTENT_CACHED:
1960                 el = bkey_i_to_s_extent(l);
1961                 er = bkey_i_to_s_extent(r);
1962
1963                 extent_for_each_entry(el, en_l) {
1964                         struct bch_extent_ptr *lp, *rp;
1965                         struct bch_dev *ca;
1966
1967                         en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
1968
1969                         if ((extent_entry_type(en_l) !=
1970                              extent_entry_type(en_r)) ||
1971                             extent_entry_is_crc(en_l))
1972                                 return BCH_MERGE_NOMERGE;
1973
1974                         lp = &en_l->ptr;
1975                         rp = &en_r->ptr;
1976
1977                         if (lp->offset + el.k->size     != rp->offset ||
1978                             lp->dev                     != rp->dev ||
1979                             lp->gen                     != rp->gen)
1980                                 return BCH_MERGE_NOMERGE;
1981
1982                         /* We don't allow extents to straddle buckets: */
1983                         ca = bch_dev_bkey_exists(c, lp->dev);
1984
1985                         if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
1986                                 return BCH_MERGE_NOMERGE;
1987                 }
1988
1989                 break;
1990         case BCH_RESERVATION: {
1991                 struct bkey_i_reservation *li = bkey_i_to_reservation(l);
1992                 struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
1993
1994                 if (li->v.generation != ri->v.generation ||
1995                     li->v.nr_replicas != ri->v.nr_replicas)
1996                         return BCH_MERGE_NOMERGE;
1997                 break;
1998         }
1999         default:
2000                 return BCH_MERGE_NOMERGE;
2001         }
2002
2003         l->k.needs_whiteout |= r->k.needs_whiteout;
2004
2005         /* Keys with no pointers aren't restricted to one bucket and could
2006          * overflow KEY_SIZE
2007          */
2008         if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
2009                 bch2_key_resize(&l->k, KEY_SIZE_MAX);
2010                 bch2_cut_front(l->k.p, r);
2011                 return BCH_MERGE_PARTIAL;
2012         }
2013
2014         bch2_key_resize(&l->k, l->k.size + r->k.size);
2015
2016         return BCH_MERGE_MERGE;
2017 }
2018
2019 /*
2020  * When merging an extent that we're inserting into a btree node, the new merged
2021  * extent could overlap with an existing 0 size extent - if we don't fix that,
2022  * it'll break the btree node iterator so this code finds those 0 size extents
2023  * and shifts them out of the way.
2024  *
2025  * Also unpacks and repacks.
2026  */
2027 static bool bch2_extent_merge_inline(struct bch_fs *c,
2028                                      struct btree_iter *iter,
2029                                      struct bkey_packed *l,
2030                                      struct bkey_packed *r,
2031                                      bool back_merge)
2032 {
2033         struct btree *b = iter->l[0].b;
2034         struct btree_node_iter *node_iter = &iter->l[0].iter;
2035         BKEY_PADDED(k) li, ri;
2036         struct bkey_packed *m   = back_merge ? l : r;
2037         struct bkey_i *mi       = back_merge ? &li.k : &ri.k;
2038         struct bset_tree *t     = bch2_bkey_to_bset(b, m);
2039         enum merge_result ret;
2040
2041         EBUG_ON(bkey_written(b, m));
2042
2043         /*
2044          * We need to save copies of both l and r, because we might get a
2045          * partial merge (which modifies both) and then fails to repack
2046          */
2047         bch2_bkey_unpack(b, &li.k, l);
2048         bch2_bkey_unpack(b, &ri.k, r);
2049
2050         ret = bch2_extent_merge(c, b, &li.k, &ri.k);
2051         if (ret == BCH_MERGE_NOMERGE)
2052                 return false;
2053
2054         /*
2055          * check if we overlap with deleted extents - would break the sort
2056          * order:
2057          */
2058         if (back_merge) {
2059                 struct bkey_packed *n = bkey_next(m);
2060
2061                 if (n != btree_bkey_last(b, t) &&
2062                     bkey_cmp_left_packed(b, n, &li.k.k.p) <= 0 &&
2063                     bkey_deleted(n))
2064                         return false;
2065         } else if (ret == BCH_MERGE_MERGE) {
2066                 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
2067
2068                 if (prev &&
2069                     bkey_cmp_left_packed_byval(b, prev,
2070                                 bkey_start_pos(&li.k.k)) > 0)
2071                         return false;
2072         }
2073
2074         if (ret == BCH_MERGE_PARTIAL) {
2075                 if (!extent_i_save(b, m, mi))
2076                         return false;
2077
2078                 if (!back_merge)
2079                         bkey_copy(packed_to_bkey(l), &li.k);
2080                 else
2081                         bkey_copy(packed_to_bkey(r), &ri.k);
2082         } else {
2083                 if (!extent_i_save(b, m, &li.k))
2084                         return false;
2085         }
2086
2087         bch2_bset_fix_invalidated_key(b, m);
2088         bch2_btree_node_iter_fix(iter, b, node_iter,
2089                                  m, m->u64s, m->u64s);
2090         verify_modified_extent(iter, m);
2091
2092         return ret == BCH_MERGE_MERGE;
2093 }
2094
2095 int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size)
2096 {
2097         struct btree_iter iter;
2098         struct bpos end = pos;
2099         struct bkey_s_c k;
2100         int ret = 0;
2101
2102         end.offset += size;
2103
2104         for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos,
2105                              BTREE_ITER_SLOTS, k) {
2106                 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2107                         break;
2108
2109                 if (!bch2_extent_is_fully_allocated(k)) {
2110                         ret = -ENOSPC;
2111                         break;
2112                 }
2113         }
2114         bch2_btree_iter_unlock(&iter);
2115
2116         return ret;
2117 }