]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcache/extents.c
Delete more unused shim code, update bcache code
[bcachefs-tools-debian] / libbcache / extents.c
1 /*
2  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3  *
4  * Code for managing the extent btree and dynamically updating the writeback
5  * dirty sector count.
6  */
7
8 #include "bcache.h"
9 #include "bkey_methods.h"
10 #include "btree_gc.h"
11 #include "btree_update.h"
12 #include "checksum.h"
13 #include "debug.h"
14 #include "dirent.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "inode.h"
18 #include "journal.h"
19 #include "super-io.h"
20 #include "writeback.h"
21 #include "xattr.h"
22
23 #include <trace/events/bcache.h>
24
25 static enum merge_result bch_extent_merge(struct cache_set *, struct btree *,
26                                           struct bkey_i *, struct bkey_i *);
27
28 static void sort_key_next(struct btree_node_iter *iter,
29                           struct btree *b,
30                           struct btree_node_iter_set *i)
31 {
32         i->k += __btree_node_offset_to_key(b, i->k)->u64s;
33
34         if (i->k == i->end)
35                 *i = iter->data[--iter->used];
36 }
37
38 /*
39  * Returns true if l > r - unless l == r, in which case returns true if l is
40  * older than r.
41  *
42  * Necessary for btree_sort_fixup() - if there are multiple keys that compare
43  * equal in different sets, we have to process them newest to oldest.
44  */
45 #define key_sort_cmp(l, r)                                              \
46 ({                                                                      \
47         int _c = bkey_cmp_packed(b,                                     \
48                                  __btree_node_offset_to_key(b, (l).k),  \
49                                  __btree_node_offset_to_key(b, (r).k)); \
50                                                                         \
51         _c ? _c > 0 : (l).k > (r).k;                                    \
52 })
53
54 static inline bool should_drop_next_key(struct btree_node_iter *iter,
55                                         struct btree *b)
56 {
57         struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
58         struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
59
60         if (bkey_whiteout(k))
61                 return true;
62
63         if (iter->used < 2)
64                 return false;
65
66         if (iter->used > 2 &&
67             key_sort_cmp(r[0], r[1]))
68                 r++;
69
70         /*
71          * key_sort_cmp() ensures that when keys compare equal the older key
72          * comes first; so if l->k compares equal to r->k then l->k is older and
73          * should be dropped.
74          */
75         return !bkey_cmp_packed(b,
76                                 __btree_node_offset_to_key(b, l->k),
77                                 __btree_node_offset_to_key(b, r->k));
78 }
79
80 struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *dst,
81                                                   struct btree *b,
82                                                   struct btree_node_iter *iter)
83 {
84         struct bkey_packed *out = dst->start;
85         struct btree_nr_keys nr;
86
87         memset(&nr, 0, sizeof(nr));
88
89         heap_resort(iter, key_sort_cmp);
90
91         while (!bch_btree_node_iter_end(iter)) {
92                 if (!should_drop_next_key(iter, b)) {
93                         struct bkey_packed *k =
94                                 __btree_node_offset_to_key(b, iter->data->k);
95
96                         bkey_copy(out, k);
97                         btree_keys_account_key_add(&nr, 0, out);
98                         out = bkey_next(out);
99                 }
100
101                 sort_key_next(iter, b, iter->data);
102                 heap_sift(iter, 0, key_sort_cmp);
103         }
104
105         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
106         return nr;
107 }
108
109 /* Common among btree and extent ptrs */
110
111 const struct bch_extent_ptr *
112 bch_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
113 {
114         const struct bch_extent_ptr *ptr;
115
116         extent_for_each_ptr(e, ptr)
117                 if (ptr->dev == dev)
118                         return ptr;
119
120         return NULL;
121 }
122
123 unsigned bch_extent_nr_ptrs(struct bkey_s_c_extent e)
124 {
125         const struct bch_extent_ptr *ptr;
126         unsigned nr_ptrs = 0;
127
128         extent_for_each_ptr(e, ptr)
129                 nr_ptrs++;
130
131         return nr_ptrs;
132 }
133
134 unsigned bch_extent_nr_dirty_ptrs(struct bkey_s_c k)
135 {
136         struct bkey_s_c_extent e;
137         const struct bch_extent_ptr *ptr;
138         unsigned nr_ptrs = 0;
139
140         switch (k.k->type) {
141         case BCH_EXTENT:
142         case BCH_EXTENT_CACHED:
143                 e = bkey_s_c_to_extent(k);
144
145                 extent_for_each_ptr(e, ptr)
146                         nr_ptrs += !ptr->cached;
147                 break;
148
149         case BCH_RESERVATION:
150                 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
151                 break;
152         }
153
154         return nr_ptrs;
155 }
156
157 /* returns true if equal */
158 static bool crc_cmp(union bch_extent_crc *l, union bch_extent_crc *r)
159 {
160         return extent_crc_type(l) == extent_crc_type(r) &&
161                 !memcmp(l, r, extent_entry_bytes(to_entry(l)));
162 }
163
164 /* Increment pointers after @crc by crc's offset until the next crc entry: */
165 void bch_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc *crc)
166 {
167         union bch_extent_entry *entry;
168
169         extent_for_each_entry_from(e, entry, extent_entry_next(to_entry(crc))) {
170                 if (!extent_entry_is_ptr(entry))
171                         return;
172
173                 entry->ptr.offset += crc_offset(crc);
174         }
175 }
176
177 /*
178  * We're writing another replica for this extent, so while we've got the data in
179  * memory we'll be computing a new checksum for the currently live data.
180  *
181  * If there are other replicas we aren't moving, and they are checksummed but
182  * not compressed, we can modify them to point to only the data that is
183  * currently live (so that readers won't have to bounce) while we've got the
184  * checksum we need:
185  *
186  * XXX: to guard against data being corrupted while in memory, instead of
187  * recomputing the checksum here, it would be better in the read path to instead
188  * of computing the checksum of the entire extent:
189  *
190  * | extent                              |
191  *
192  * compute the checksums of the live and dead data separately
193  * | dead data || live data || dead data |
194  *
195  * and then verify that crc_dead1 + crc_live + crc_dead2 == orig_crc, and then
196  * use crc_live here (that we verified was correct earlier)
197  *
198  * note: doesn't work with encryption
199  */
200 void bch_extent_narrow_crcs(struct bkey_s_extent e)
201 {
202         union bch_extent_crc *crc;
203         bool have_wide = false, have_narrow = false;
204         struct bch_csum csum = { 0 };
205         unsigned csum_type = 0;
206
207         extent_for_each_crc(e, crc) {
208                 if (crc_compression_type(crc) ||
209                     bch_csum_type_is_encryption(crc_csum_type(crc)))
210                         continue;
211
212                 if (crc_uncompressed_size(e.k, crc) != e.k->size) {
213                         have_wide = true;
214                 } else {
215                         have_narrow = true;
216                         csum = crc_csum(crc);
217                         csum_type = crc_csum_type(crc);
218                 }
219         }
220
221         if (!have_wide || !have_narrow)
222                 return;
223
224         extent_for_each_crc(e, crc) {
225                 if (crc_compression_type(crc))
226                         continue;
227
228                 if (crc_uncompressed_size(e.k, crc) != e.k->size) {
229                         switch (extent_crc_type(crc)) {
230                         case BCH_EXTENT_CRC_NONE:
231                                 BUG();
232                         case BCH_EXTENT_CRC32:
233                                 if (bch_crc_bytes[csum_type] > 4)
234                                         continue;
235
236                                 bch_extent_crc_narrow_pointers(e, crc);
237                                 crc->crc32._compressed_size     = e.k->size - 1;
238                                 crc->crc32._uncompressed_size   = e.k->size - 1;
239                                 crc->crc32.offset               = 0;
240                                 crc->crc32.csum_type            = csum_type;
241                                 crc->crc32.csum                 = csum.lo;
242                                 break;
243                         case BCH_EXTENT_CRC64:
244                                 if (bch_crc_bytes[csum_type] > 10)
245                                         continue;
246
247                                 bch_extent_crc_narrow_pointers(e, crc);
248                                 crc->crc64._compressed_size     = e.k->size - 1;
249                                 crc->crc64._uncompressed_size   = e.k->size - 1;
250                                 crc->crc64.offset               = 0;
251                                 crc->crc64.csum_type            = csum_type;
252                                 crc->crc64.csum_lo              = csum.lo;
253                                 crc->crc64.csum_hi              = csum.hi;
254                                 break;
255                         case BCH_EXTENT_CRC128:
256                                 if (bch_crc_bytes[csum_type] > 16)
257                                         continue;
258
259                                 bch_extent_crc_narrow_pointers(e, crc);
260                                 crc->crc128._compressed_size    = e.k->size - 1;
261                                 crc->crc128._uncompressed_size  = e.k->size - 1;
262                                 crc->crc128.offset              = 0;
263                                 crc->crc128.csum_type           = csum_type;
264                                 crc->crc128.csum                = csum;
265                                 break;
266                         }
267                 }
268         }
269 }
270
271 void bch_extent_drop_redundant_crcs(struct bkey_s_extent e)
272 {
273         union bch_extent_entry *entry = e.v->start;
274         union bch_extent_crc *crc, *prev = NULL;
275
276         while (entry != extent_entry_last(e)) {
277                 union bch_extent_entry *next = extent_entry_next(entry);
278                 size_t crc_u64s = extent_entry_u64s(entry);
279
280                 if (!extent_entry_is_crc(entry))
281                         goto next;
282
283                 crc = entry_to_crc(entry);
284
285                 if (next == extent_entry_last(e)) {
286                         /* crc entry with no pointers after it: */
287                         goto drop;
288                 }
289
290                 if (extent_entry_is_crc(next)) {
291                         /* no pointers before next crc entry: */
292                         goto drop;
293                 }
294
295                 if (prev && crc_cmp(crc, prev)) {
296                         /* identical to previous crc entry: */
297                         goto drop;
298                 }
299
300                 if (!prev &&
301                     !crc_csum_type(crc) &&
302                     !crc_compression_type(crc)) {
303                         /* null crc entry: */
304                         bch_extent_crc_narrow_pointers(e, crc);
305                         goto drop;
306                 }
307
308                 prev = crc;
309 next:
310                 entry = next;
311                 continue;
312 drop:
313                 memmove_u64s_down(crc, next,
314                                   (u64 *) extent_entry_last(e) - (u64 *) next);
315                 e.k->u64s -= crc_u64s;
316         }
317
318         EBUG_ON(bkey_val_u64s(e.k) && !bch_extent_nr_ptrs(e.c));
319 }
320
321 static bool should_drop_ptr(const struct cache_set *c,
322                             struct bkey_s_c_extent e,
323                             const struct bch_extent_ptr *ptr)
324 {
325         struct cache *ca;
326
327         return (ca = PTR_CACHE(c, ptr)) && ptr_stale(ca, ptr);
328 }
329
330 static void bch_extent_drop_stale(struct cache_set *c, struct bkey_s_extent e)
331 {
332         struct bch_extent_ptr *ptr = &e.v->start->ptr;
333         bool dropped = false;
334
335         rcu_read_lock();
336         while ((ptr = extent_ptr_next(e, ptr)))
337                 if (should_drop_ptr(c, e.c, ptr)) {
338                         __bch_extent_drop_ptr(e, ptr);
339                         dropped = true;
340                 } else
341                         ptr++;
342         rcu_read_unlock();
343
344         if (dropped)
345                 bch_extent_drop_redundant_crcs(e);
346 }
347
348 static bool bch_ptr_normalize(struct cache_set *c, struct btree *bk,
349                               struct bkey_s k)
350 {
351         return bch_extent_normalize(c, k);
352 }
353
354 static void bch_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
355 {
356         switch (k->type) {
357         case BCH_EXTENT:
358         case BCH_EXTENT_CACHED: {
359                 union bch_extent_entry *entry;
360                 u64 *d = (u64 *) bkeyp_val(f, k);
361                 unsigned i;
362
363                 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
364                         d[i] = swab64(d[i]);
365
366                 for (entry = (union bch_extent_entry *) d;
367                      entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
368                      entry = extent_entry_next(entry)) {
369                         switch (extent_entry_type(entry)) {
370                         case BCH_EXTENT_ENTRY_crc32:
371                                 entry->crc32.csum = swab32(entry->crc32.csum);
372                                 break;
373                         case BCH_EXTENT_ENTRY_crc64:
374                                 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
375                                 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
376                                 break;
377                         case BCH_EXTENT_ENTRY_crc128:
378                                 entry->crc128.csum.hi = swab64(entry->crc64.csum_hi);
379                                 entry->crc128.csum.lo = swab64(entry->crc64.csum_lo);
380                                 break;
381                         case BCH_EXTENT_ENTRY_ptr:
382                                 break;
383                         }
384                 }
385                 break;
386         }
387         }
388 }
389
390 static const char *extent_ptr_invalid(struct bkey_s_c_extent e,
391                                       const struct cache_member_rcu *mi,
392                                       const struct bch_extent_ptr *ptr,
393                                       unsigned size_ondisk)
394 {
395         const struct bch_extent_ptr *ptr2;
396         const struct cache_member_cpu *m = mi->m + ptr->dev;
397
398         if (ptr->dev > mi->nr_devices || !m->valid)
399                 return "pointer to invalid device";
400
401         extent_for_each_ptr(e, ptr2)
402                 if (ptr != ptr2 && ptr->dev == ptr2->dev)
403                         return "multiple pointers to same device";
404
405         if (ptr->offset + size_ondisk > m->bucket_size * m->nbuckets)
406                 return "offset past end of device";
407
408         if (ptr->offset < m->bucket_size * m->first_bucket)
409                 return "offset before first bucket";
410
411         if ((ptr->offset & (m->bucket_size - 1)) + size_ondisk > m->bucket_size)
412                 return "spans multiple buckets";
413
414         return NULL;
415 }
416
417 static size_t extent_print_ptrs(struct cache_set *c, char *buf,
418                                 size_t size, struct bkey_s_c_extent e)
419 {
420         char *out = buf, *end = buf + size;
421         const union bch_extent_entry *entry;
422         const union bch_extent_crc *crc;
423         const struct bch_extent_ptr *ptr;
424         struct cache *ca;
425         bool first = true;
426
427 #define p(...)  (out += scnprintf(out, end - out, __VA_ARGS__))
428
429         rcu_read_lock();
430         extent_for_each_entry(e, entry) {
431                 if (!first)
432                         p(" ");
433
434                 switch (__extent_entry_type(entry)) {
435                 case BCH_EXTENT_ENTRY_crc32:
436                 case BCH_EXTENT_ENTRY_crc64:
437                 case BCH_EXTENT_ENTRY_crc128:
438                         crc = entry_to_crc(entry);
439
440                         p("crc: c_size %u size %u offset %u csum %u compress %u",
441                           crc_compressed_size(e.k, crc),
442                           crc_uncompressed_size(e.k, crc),
443                           crc_offset(crc), crc_csum_type(crc),
444                           crc_compression_type(crc));
445                         break;
446                 case BCH_EXTENT_ENTRY_ptr:
447                         ptr = entry_to_ptr(entry);
448
449                         p("ptr: %u:%llu gen %u%s", ptr->dev,
450                           (u64) ptr->offset, ptr->gen,
451                           (ca = PTR_CACHE(c, ptr)) && ptr_stale(ca, ptr)
452                           ? " stale" : "");
453                         break;
454                 default:
455                         p("(invalid extent entry %.16llx)", *((u64 *) entry));
456                         goto out;
457                 }
458
459                 first = false;
460         }
461 out:
462         rcu_read_unlock();
463
464         if (bkey_extent_is_cached(e.k))
465                 p(" cached");
466 #undef p
467         return out - buf;
468 }
469
470 /* Btree ptrs */
471
472 static const char *bch_btree_ptr_invalid(const struct cache_set *c,
473                                          struct bkey_s_c k)
474 {
475         if (bkey_extent_is_cached(k.k))
476                 return "cached";
477
478         if (k.k->size)
479                 return "nonzero key size";
480
481         if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
482                 return "value too big";
483
484         switch (k.k->type) {
485         case BCH_EXTENT: {
486                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
487                 const union bch_extent_entry *entry;
488                 const struct bch_extent_ptr *ptr;
489                 const union bch_extent_crc *crc;
490                 struct cache_member_rcu *mi;
491                 const char *reason;
492
493                 extent_for_each_entry(e, entry)
494                         if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
495                                 return "invalid extent entry type";
496
497                 mi = cache_member_info_get(c);
498
499                 extent_for_each_ptr_crc(e, ptr, crc) {
500                         reason = extent_ptr_invalid(e, mi, ptr,
501                                                 c->sb.btree_node_size);
502
503                         if (reason) {
504                                 cache_member_info_put();
505                                 return reason;
506                         }
507                 }
508
509                 cache_member_info_put();
510
511                 if (crc)
512                         return "has crc field";
513
514                 return NULL;
515         }
516
517         default:
518                 return "invalid value type";
519         }
520 }
521
522 static void btree_ptr_debugcheck(struct cache_set *c, struct btree *b,
523                                  struct bkey_s_c k)
524 {
525         struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
526         const struct bch_extent_ptr *ptr;
527         unsigned seq;
528         const char *err;
529         char buf[160];
530         struct bucket *g;
531         struct cache *ca;
532         unsigned replicas = 0;
533         bool bad;
534
535         rcu_read_lock();
536
537         extent_for_each_online_device(c, e, ptr, ca) {
538                 replicas++;
539
540                 if ((ca = PTR_CACHE(c, ptr))) {
541                         g = PTR_BUCKET(ca, ptr);
542
543                         err = "stale";
544                         if (ptr_stale(ca, ptr))
545                                 goto err;
546
547                         do {
548                                 seq = read_seqcount_begin(&c->gc_pos_lock);
549                                 bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
550                                        !g->mark.is_metadata;
551                         } while (read_seqcount_retry(&c->gc_pos_lock, seq));
552
553                         err = "inconsistent";
554                         if (bad)
555                                 goto err;
556                 }
557         }
558
559         rcu_read_unlock();
560
561         if (replicas < c->sb.meta_replicas_have) {
562                 bch_bkey_val_to_text(c, btree_node_type(b),
563                                      buf, sizeof(buf), k);
564                 bch_fs_bug(c,
565                         "btree key bad (too few replicas, %u < %u): %s",
566                         replicas, c->sb.meta_replicas_have, buf);
567                 return;
568         }
569
570         return;
571 err:
572         bch_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k);
573         bch_fs_bug(c, "%s btree pointer %s: bucket %zi prio %i "
574                       "gen %i last_gc %i mark %08x",
575                       err, buf, PTR_BUCKET_NR(ca, ptr),
576                       g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen,
577                       ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
578                       (unsigned) g->mark.counter);
579         rcu_read_unlock();
580 }
581
582 static void bch_btree_ptr_to_text(struct cache_set *c, char *buf,
583                                   size_t size, struct bkey_s_c k)
584 {
585         char *out = buf, *end = buf + size;
586         const char *invalid;
587
588 #define p(...)  (out += scnprintf(out, end - out, __VA_ARGS__))
589
590         if (bkey_extent_is_data(k.k))
591                 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
592
593         invalid = bch_btree_ptr_invalid(c, k);
594         if (invalid)
595                 p(" invalid: %s", invalid);
596 #undef p
597 }
598
599 struct extent_pick_ptr
600 bch_btree_pick_ptr(struct cache_set *c, const struct btree *b)
601 {
602         struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
603         const union bch_extent_crc *crc;
604         const struct bch_extent_ptr *ptr;
605         struct cache *ca;
606
607         rcu_read_lock();
608
609         extent_for_each_online_device_crc(c, e, crc, ptr, ca) {
610                 struct btree *root = btree_node_root(c, b);
611
612                 if (bch_fs_inconsistent_on(crc, c,
613                                 "btree node pointer with crc at btree %u level %u/%u bucket %zu",
614                                 b->btree_id, b->level, root ? root->level : -1,
615                                 PTR_BUCKET_NR(ca, ptr)))
616                         break;
617
618                 if (bch_dev_inconsistent_on(ptr_stale(ca, ptr), ca,
619                                 "stale btree node pointer at btree %u level %u/%u bucket %zu",
620                                 b->btree_id, b->level, root ? root->level : -1,
621                                 PTR_BUCKET_NR(ca, ptr)))
622                         continue;
623
624                 percpu_ref_get(&ca->ref);
625                 rcu_read_unlock();
626
627                 return (struct extent_pick_ptr) { .ptr = *ptr, .ca = ca };
628         }
629
630         rcu_read_unlock();
631
632         return (struct extent_pick_ptr) { .ca = NULL, };
633 }
634
635 const struct bkey_ops bch_bkey_btree_ops = {
636         .key_invalid    = bch_btree_ptr_invalid,
637         .key_debugcheck = btree_ptr_debugcheck,
638         .val_to_text    = bch_btree_ptr_to_text,
639         .swab           = bch_ptr_swab,
640 };
641
642 /* Extents */
643
644 static bool __bch_cut_front(struct bpos where, struct bkey_s k)
645 {
646         u64 len = 0;
647
648         if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
649                 return false;
650
651         EBUG_ON(bkey_cmp(where, k.k->p) > 0);
652
653         len = k.k->p.offset - where.offset;
654
655         BUG_ON(len > k.k->size);
656
657         /*
658          * Don't readjust offset if the key size is now 0, because that could
659          * cause offset to point to the next bucket:
660          */
661         if (!len)
662                 __set_bkey_deleted(k.k);
663         else if (bkey_extent_is_data(k.k)) {
664                 struct bkey_s_extent e = bkey_s_to_extent(k);
665                 struct bch_extent_ptr *ptr;
666                 union bch_extent_crc *crc, *prev_crc = NULL;
667
668                 extent_for_each_ptr_crc(e, ptr, crc) {
669                         switch (extent_crc_type(crc)) {
670                         case BCH_EXTENT_CRC_NONE:
671                                 ptr->offset += e.k->size - len;
672                                 break;
673                         case BCH_EXTENT_CRC32:
674                                 if (prev_crc != crc)
675                                         crc->crc32.offset += e.k->size - len;
676                                 break;
677                         case BCH_EXTENT_CRC64:
678                                 if (prev_crc != crc)
679                                         crc->crc64.offset += e.k->size - len;
680                                 break;
681                         case BCH_EXTENT_CRC128:
682                                 if (prev_crc != crc)
683                                         crc->crc128.offset += e.k->size - len;
684                                 break;
685                         }
686                         prev_crc = crc;
687                 }
688         }
689
690         k.k->size = len;
691
692         return true;
693 }
694
695 bool bch_cut_front(struct bpos where, struct bkey_i *k)
696 {
697         return __bch_cut_front(where, bkey_i_to_s(k));
698 }
699
700 bool bch_cut_back(struct bpos where, struct bkey *k)
701 {
702         u64 len = 0;
703
704         if (bkey_cmp(where, k->p) >= 0)
705                 return false;
706
707         EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
708
709         len = where.offset - bkey_start_offset(k);
710
711         BUG_ON(len > k->size);
712
713         k->p = where;
714         k->size = len;
715
716         if (!len)
717                 __set_bkey_deleted(k);
718
719         return true;
720 }
721
722 /**
723  * bch_key_resize - adjust size of @k
724  *
725  * bkey_start_offset(k) will be preserved, modifies where the extent ends
726  */
727 void bch_key_resize(struct bkey *k,
728                     unsigned new_size)
729 {
730         k->p.offset -= k->size;
731         k->p.offset += new_size;
732         k->size = new_size;
733 }
734
735 /*
736  * In extent_sort_fix_overlapping(), insert_fixup_extent(),
737  * extent_merge_inline() - we're modifying keys in place that are packed. To do
738  * that we have to unpack the key, modify the unpacked key - then this
739  * copies/repacks the unpacked to the original as necessary.
740  */
741 static bool __extent_save(struct btree *b, struct btree_node_iter *iter,
742                           struct bkey_packed *dst, struct bkey *src)
743 {
744         struct bkey_format *f = &b->format;
745         struct bkey_i *dst_unpacked;
746         bool ret;
747
748         if ((dst_unpacked = packed_to_bkey(dst))) {
749                 dst_unpacked->k = *src;
750                 ret = true;
751         } else {
752                 ret = bkey_pack_key(dst, src, f);
753         }
754
755         if (ret && iter)
756                 bch_verify_key_order(b, iter, dst);
757
758         return ret;
759 }
760
761 static void extent_save(struct btree *b, struct btree_node_iter *iter,
762                         struct bkey_packed *dst, struct bkey *src)
763 {
764         BUG_ON(!__extent_save(b, iter, dst, src));
765 }
766
767 /*
768  * Returns true if l > r - unless l == r, in which case returns true if l is
769  * older than r.
770  *
771  * Necessary for sort_fix_overlapping() - if there are multiple keys that
772  * compare equal in different sets, we have to process them newest to oldest.
773  */
774 #define extent_sort_cmp(l, r)                                           \
775 ({                                                                      \
776         struct bkey _ul = bkey_unpack_key(b,                            \
777                                 __btree_node_offset_to_key(b, (l).k));  \
778         struct bkey _ur = bkey_unpack_key(b,                            \
779                                 __btree_node_offset_to_key(b, (r).k));  \
780                                                                         \
781         int _c = bkey_cmp(bkey_start_pos(&_ul), bkey_start_pos(&_ur));  \
782         _c ? _c > 0 : (l).k < (r).k;                                    \
783 })
784
785 static inline void extent_sort_sift(struct btree_node_iter *iter,
786                                     struct btree *b, size_t i)
787 {
788         heap_sift(iter, i, extent_sort_cmp);
789 }
790
791 static inline void extent_sort_next(struct btree_node_iter *iter,
792                                     struct btree *b,
793                                     struct btree_node_iter_set *i)
794 {
795         sort_key_next(iter, b, i);
796         heap_sift(iter, i - iter->data, extent_sort_cmp);
797 }
798
799 static void extent_sort_append(struct cache_set *c,
800                                struct btree *b,
801                                struct btree_nr_keys *nr,
802                                struct bkey_packed *start,
803                                struct bkey_packed **prev,
804                                struct bkey_packed *k)
805 {
806         struct bkey_format *f = &b->format;
807         BKEY_PADDED(k) tmp;
808
809         if (bkey_whiteout(k))
810                 return;
811
812         bkey_unpack(b, &tmp.k, k);
813
814         if (*prev &&
815             bch_extent_merge(c, b, (void *) *prev, &tmp.k))
816                 return;
817
818         if (*prev) {
819                 bkey_pack(*prev, (void *) *prev, f);
820
821                 btree_keys_account_key_add(nr, 0, *prev);
822                 *prev = bkey_next(*prev);
823         } else {
824                 *prev = start;
825         }
826
827         bkey_copy(*prev, &tmp.k);
828 }
829
830 struct btree_nr_keys bch_extent_sort_fix_overlapping(struct cache_set *c,
831                                         struct bset *dst,
832                                         struct btree *b,
833                                         struct btree_node_iter *iter)
834 {
835         struct bkey_format *f = &b->format;
836         struct btree_node_iter_set *_l = iter->data, *_r;
837         struct bkey_packed *prev = NULL, *out, *lk, *rk;
838         struct bkey l_unpacked, r_unpacked;
839         struct bkey_s l, r;
840         struct btree_nr_keys nr;
841
842         memset(&nr, 0, sizeof(nr));
843
844         heap_resort(iter, extent_sort_cmp);
845
846         while (!bch_btree_node_iter_end(iter)) {
847                 lk = __btree_node_offset_to_key(b, _l->k);
848
849                 if (iter->used == 1) {
850                         extent_sort_append(c, b, &nr, dst->start, &prev, lk);
851                         extent_sort_next(iter, b, _l);
852                         continue;
853                 }
854
855                 _r = iter->data + 1;
856                 if (iter->used > 2 &&
857                     extent_sort_cmp(_r[0], _r[1]))
858                         _r++;
859
860                 rk = __btree_node_offset_to_key(b, _r->k);
861
862                 l = __bkey_disassemble(b, lk, &l_unpacked);
863                 r = __bkey_disassemble(b, rk, &r_unpacked);
864
865                 /* If current key and next key don't overlap, just append */
866                 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
867                         extent_sort_append(c, b, &nr, dst->start, &prev, lk);
868                         extent_sort_next(iter, b, _l);
869                         continue;
870                 }
871
872                 /* Skip 0 size keys */
873                 if (!r.k->size) {
874                         extent_sort_next(iter, b, _r);
875                         continue;
876                 }
877
878                 /*
879                  * overlap: keep the newer key and trim the older key so they
880                  * don't overlap. comparing pointers tells us which one is
881                  * newer, since the bsets are appended one after the other.
882                  */
883
884                 /* can't happen because of comparison func */
885                 BUG_ON(_l->k < _r->k &&
886                        !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
887
888                 if (_l->k > _r->k) {
889                         /* l wins, trim r */
890                         if (bkey_cmp(l.k->p, r.k->p) >= 0) {
891                                 sort_key_next(iter, b, _r);
892                         } else {
893                                 __bch_cut_front(l.k->p, r);
894                                 extent_save(b, NULL, rk, r.k);
895                         }
896
897                         extent_sort_sift(iter, b, _r - iter->data);
898                 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
899                         BKEY_PADDED(k) tmp;
900
901                         /*
902                          * r wins, but it overlaps in the middle of l - split l:
903                          */
904                         bkey_reassemble(&tmp.k, l.s_c);
905                         bch_cut_back(bkey_start_pos(r.k), &tmp.k.k);
906
907                         __bch_cut_front(r.k->p, l);
908                         extent_save(b, NULL, lk, l.k);
909
910                         extent_sort_sift(iter, b, 0);
911
912                         extent_sort_append(c, b, &nr, dst->start, &prev,
913                                            bkey_to_packed(&tmp.k));
914                 } else {
915                         bch_cut_back(bkey_start_pos(r.k), l.k);
916                         extent_save(b, NULL, lk, l.k);
917                 }
918         }
919
920         if (prev) {
921                 bkey_pack(prev, (void *) prev, f);
922                 btree_keys_account_key_add(&nr, 0, prev);
923                 out = bkey_next(prev);
924         } else {
925                 out = dst->start;
926         }
927
928         dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
929         return nr;
930 }
931
932 struct extent_insert_state {
933         struct btree_insert             *trans;
934         struct btree_insert_entry       *insert;
935         struct bpos                     committed;
936         struct bucket_stats_cache_set   stats;
937
938         /* for deleting: */
939         struct bkey_i                   whiteout;
940         bool                            do_journal;
941         bool                            deleting;
942 };
943
944 static void bch_add_sectors(struct extent_insert_state *s,
945                             struct bkey_s_c k, u64 offset, s64 sectors)
946 {
947         struct cache_set *c = s->trans->c;
948         struct btree *b = s->insert->iter->nodes[0];
949
950         EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
951
952         if (!sectors)
953                 return;
954
955         bch_mark_key(c, k, sectors, false, gc_pos_btree_node(b),
956                      &s->stats, s->trans->journal_res.seq);
957
958         if (bkey_extent_is_data(k.k) &&
959             !bkey_extent_is_cached(k.k))
960                 bcache_dev_sectors_dirty_add(c, k.k->p.inode, offset, sectors);
961 }
962
963 static void bch_subtract_sectors(struct extent_insert_state *s,
964                                  struct bkey_s_c k, u64 offset, s64 sectors)
965 {
966         bch_add_sectors(s, k, offset, -sectors);
967 }
968
969 /* These wrappers subtract exactly the sectors that we're removing from @k */
970 static void bch_cut_subtract_back(struct extent_insert_state *s,
971                                   struct bpos where, struct bkey_s k)
972 {
973         bch_subtract_sectors(s, k.s_c, where.offset,
974                              k.k->p.offset - where.offset);
975         bch_cut_back(where, k.k);
976 }
977
978 static void bch_cut_subtract_front(struct extent_insert_state *s,
979                                    struct bpos where, struct bkey_s k)
980 {
981         bch_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
982                              where.offset - bkey_start_offset(k.k));
983         __bch_cut_front(where, k);
984 }
985
986 static void bch_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
987 {
988         if (k.k->size)
989                 bch_subtract_sectors(s, k.s_c,
990                                      bkey_start_offset(k.k), k.k->size);
991         k.k->size = 0;
992         __set_bkey_deleted(k.k);
993 }
994
995 /*
996  * Note: If this returns true because only some pointers matched,
997  * we can lose some caching that had happened in the interim.
998  * Because cache promotion only promotes the part of the extent
999  * actually read, and not the whole extent, and due to the key
1000  * splitting done in bch_extent_insert_fixup, preserving such
1001  * caching is difficult.
1002  */
1003 static bool bch_extent_cmpxchg_cmp(struct bkey_s_c l, struct bkey_s_c r)
1004 {
1005         struct bkey_s_c_extent le, re;
1006         const struct bch_extent_ptr *lp, *rp;
1007         s64 offset;
1008
1009         BUG_ON(!l.k->size || !r.k->size);
1010
1011         if (l.k->type != r.k->type ||
1012             bversion_cmp(l.k->version, r.k->version))
1013                 return false;
1014
1015         switch (l.k->type) {
1016         case KEY_TYPE_COOKIE:
1017                 return !memcmp(bkey_s_c_to_cookie(l).v,
1018                                bkey_s_c_to_cookie(r).v,
1019                                sizeof(struct bch_cookie));
1020
1021         case BCH_EXTENT:
1022         case BCH_EXTENT_CACHED:
1023                 le = bkey_s_c_to_extent(l);
1024                 re = bkey_s_c_to_extent(r);
1025
1026                 /*
1027                  * bkey_cmpxchg() handles partial matches - when either l or r
1028                  * has been trimmed - so we need just to handle l or r not
1029                  * starting at the same place when checking for a match here.
1030                  *
1031                  * If the starts of the keys are different, we just apply that
1032                  * offset to the device pointer offsets when checking those -
1033                  * matching how bch_cut_front() adjusts device pointer offsets
1034                  * when adjusting the start of a key:
1035                  */
1036                 offset = bkey_start_offset(l.k) - bkey_start_offset(r.k);
1037
1038                 /*
1039                  * XXX: perhaps we only raced with copygc or tiering replacing
1040                  * one of the pointers: it should suffice to find _any_ matching
1041                  * pointer
1042                  */
1043
1044                 if (bkey_val_u64s(le.k) != bkey_val_u64s(re.k))
1045                         return false;
1046
1047                 extent_for_each_ptr(le, lp) {
1048                         const union bch_extent_entry *entry =
1049                                 vstruct_idx(re.v, (u64 *) lp - le.v->_data);
1050
1051                         if (!extent_entry_is_ptr(entry))
1052                                 return false;
1053
1054                         rp = &entry->ptr;
1055
1056                         if (lp->offset  != rp->offset + offset ||
1057                             lp->dev     != rp->dev ||
1058                             lp->gen     != rp->gen)
1059                                 return false;
1060                 }
1061
1062                 return true;
1063         default:
1064                 return false;
1065         }
1066
1067 }
1068
1069 /*
1070  * Returns true on success, false on failure (and false means @new no longer
1071  * overlaps with @k)
1072  *
1073  * If returned true, we may have inserted up to one key in @b.
1074  * If returned false, we may have inserted up to two keys in @b.
1075  *
1076  * On return, there is room in @res for at least one more key of the same size
1077  * as @new.
1078  */
1079 enum extent_insert_hook_ret bch_extent_cmpxchg(struct extent_insert_hook *hook,
1080                                                struct bpos committed_pos,
1081                                                struct bpos next_pos,
1082                                                struct bkey_s_c k,
1083                                                const struct bkey_i *new)
1084 {
1085         struct bch_replace_info *replace = container_of(hook,
1086                                         struct bch_replace_info, hook);
1087         struct bkey_i *old = &replace->key;
1088
1089         EBUG_ON(bkey_cmp(committed_pos, bkey_start_pos(&new->k)) < 0);
1090
1091         /* must have something to compare against */
1092         EBUG_ON(!bkey_val_u64s(&old->k));
1093
1094         /* new must be a subset of old */
1095         EBUG_ON(bkey_cmp(new->k.p, old->k.p) > 0 ||
1096                 bkey_cmp(bkey_start_pos(&new->k), bkey_start_pos(&old->k)) < 0);
1097
1098         if (k.k && bch_extent_cmpxchg_cmp(k, bkey_i_to_s_c(old))) {
1099                 replace->successes++;
1100                 return BTREE_HOOK_DO_INSERT;
1101         } else {
1102                 replace->failures++;
1103                 return BTREE_HOOK_NO_INSERT;
1104         }
1105 }
1106
1107 static bool bch_extent_merge_inline(struct cache_set *,
1108                                     struct btree_iter *,
1109                                     struct bkey_packed *,
1110                                     struct bkey_packed *,
1111                                     bool);
1112
1113 #define MAX_LOCK_HOLD_TIME      (5 * NSEC_PER_MSEC)
1114
1115 static enum btree_insert_ret
1116 extent_insert_should_stop(struct extent_insert_state *s)
1117 {
1118         struct btree *b = s->insert->iter->nodes[0];
1119
1120         /*
1121          * Check if we have sufficient space in both the btree node and the
1122          * journal reservation:
1123          *
1124          * Each insert checks for room in the journal entry, but we check for
1125          * room in the btree node up-front. In the worst case, bkey_cmpxchg()
1126          * will insert two keys, and one iteration of this room will insert one
1127          * key, so we need room for three keys.
1128          */
1129         if (!bch_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s))
1130                 return BTREE_INSERT_BTREE_NODE_FULL;
1131         else if (!journal_res_insert_fits(s->trans, s->insert))
1132                 return BTREE_INSERT_JOURNAL_RES_FULL; /* XXX worth tracing */
1133         else
1134                 return BTREE_INSERT_OK;
1135 }
1136
1137 static void extent_bset_insert(struct cache_set *c, struct btree_iter *iter,
1138                                struct bkey_i *insert)
1139 {
1140         struct btree *b = iter->nodes[0];
1141         struct btree_node_iter *node_iter = &iter->node_iters[0];
1142         struct bset_tree *t = bset_tree_last(b);
1143         struct bkey_packed *where =
1144                 bch_btree_node_iter_bset_pos(node_iter, b, t);
1145         struct bkey_packed *prev = bkey_prev(b, t, where);
1146         struct bkey_packed *next_live_key = where;
1147         unsigned clobber_u64s;
1148
1149         if (prev)
1150                 where = bkey_next(prev);
1151
1152         while (next_live_key != btree_bkey_last(b, t) &&
1153                bkey_deleted(next_live_key))
1154                 next_live_key = bkey_next(next_live_key);
1155
1156         /*
1157          * Everything between where and next_live_key is now deleted keys, and
1158          * is overwritten:
1159          */
1160         clobber_u64s = (u64 *) next_live_key - (u64 *) where;
1161
1162         if (prev &&
1163             bch_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
1164                 goto drop_deleted_keys;
1165
1166         if (next_live_key != btree_bkey_last(b, t) &&
1167             bch_extent_merge_inline(c, iter, bkey_to_packed(insert),
1168                                     next_live_key, false))
1169                 goto drop_deleted_keys;
1170
1171         bch_bset_insert(b, node_iter, where, insert, clobber_u64s);
1172         bch_btree_node_iter_fix(iter, b, node_iter, t, where,
1173                                 clobber_u64s, where->u64s);
1174         return;
1175 drop_deleted_keys:
1176         bch_bset_delete(b, where, clobber_u64s);
1177         bch_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0);
1178 }
1179
1180 static void extent_insert_committed(struct extent_insert_state *s)
1181 {
1182         struct cache_set *c = s->trans->c;
1183         struct btree_iter *iter = s->insert->iter;
1184         struct bkey_i *insert = !s->deleting
1185                 ? s->insert->k
1186                 : &s->whiteout;
1187         BKEY_PADDED(k) split;
1188
1189         EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
1190         EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
1191
1192         if (!bkey_cmp(s->committed, bkey_start_pos(&insert->k)))
1193                 return;
1194
1195         if (s->deleting && !s->do_journal) {
1196                 bch_cut_front(s->committed, insert);
1197                 goto done;
1198         }
1199
1200         EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
1201
1202         bkey_copy(&split.k, insert);
1203
1204         if (!(s->trans->flags & BTREE_INSERT_JOURNAL_REPLAY) &&
1205             bkey_cmp(s->committed, insert->k.p) &&
1206             bkey_extent_is_compressed(bkey_i_to_s_c(insert))) {
1207                 /* XXX: possibly need to increase our reservation? */
1208                 bch_cut_subtract_back(s, s->committed,
1209                                       bkey_i_to_s(&split.k));
1210                 bch_cut_front(s->committed, insert);
1211                 bch_add_sectors(s, bkey_i_to_s_c(insert),
1212                                 bkey_start_offset(&insert->k),
1213                                 insert->k.size);
1214         } else {
1215                 bch_cut_back(s->committed, &split.k.k);
1216                 bch_cut_front(s->committed, insert);
1217         }
1218
1219         if (debug_check_bkeys(c))
1220                 bkey_debugcheck(c, iter->nodes[iter->level],
1221                                 bkey_i_to_s_c(&split.k));
1222
1223         bch_btree_journal_key(s->trans, iter, &split.k);
1224
1225         if (!s->deleting)
1226                 extent_bset_insert(c, iter, &split.k);
1227 done:
1228         bch_btree_iter_set_pos_same_leaf(iter, s->committed);
1229
1230         insert->k.needs_whiteout        = false;
1231         s->do_journal                   = false;
1232         s->trans->did_work              = true;
1233 }
1234
1235 static enum extent_insert_hook_ret
1236 __extent_insert_advance_pos(struct extent_insert_state *s,
1237                             struct bpos next_pos,
1238                             struct bkey_s_c k)
1239 {
1240         struct extent_insert_hook *hook = s->trans->hook;
1241         enum extent_insert_hook_ret ret;
1242 #if 0
1243         /*
1244          * Currently disabled for encryption - broken with fcollapse. Will have
1245          * to reenable when versions are exposed for send/receive - versions
1246          * will have to be monotonic then:
1247          */
1248         if (k.k && k.k->size &&
1249             !bversion_zero(s->insert->k->k.version) &&
1250             bversion_cmp(k.k->version, s->insert->k->k.version) > 0) {
1251                 ret = BTREE_HOOK_NO_INSERT;
1252         } else
1253 #endif
1254         if (hook)
1255                 ret = hook->fn(hook, s->committed, next_pos, k, s->insert->k);
1256         else
1257                 ret = BTREE_HOOK_DO_INSERT;
1258
1259         EBUG_ON(bkey_deleted(&s->insert->k->k) || !s->insert->k->k.size);
1260
1261         switch (ret) {
1262         case BTREE_HOOK_DO_INSERT:
1263                 break;
1264         case BTREE_HOOK_NO_INSERT:
1265                 extent_insert_committed(s);
1266                 bch_cut_subtract_front(s, next_pos, bkey_i_to_s(s->insert->k));
1267
1268                 bch_btree_iter_set_pos_same_leaf(s->insert->iter, next_pos);
1269                 break;
1270         case BTREE_HOOK_RESTART_TRANS:
1271                 return ret;
1272         }
1273
1274         s->committed = next_pos;
1275         return ret;
1276 }
1277
1278 /*
1279  * Update iter->pos, marking how much of @insert we've processed, and call hook
1280  * fn:
1281  */
1282 static enum extent_insert_hook_ret
1283 extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
1284 {
1285         struct btree *b = s->insert->iter->nodes[0];
1286         struct bpos next_pos = bpos_min(s->insert->k->k.p,
1287                                         k.k ? k.k->p : b->key.k.p);
1288
1289         /* hole? */
1290         if (k.k && bkey_cmp(s->committed, bkey_start_pos(k.k)) < 0) {
1291                 bool have_uncommitted = bkey_cmp(s->committed,
1292                                 bkey_start_pos(&s->insert->k->k)) > 0;
1293
1294                 switch (__extent_insert_advance_pos(s, bkey_start_pos(k.k),
1295                                                     bkey_s_c_null)) {
1296                 case BTREE_HOOK_DO_INSERT:
1297                         break;
1298                 case BTREE_HOOK_NO_INSERT:
1299                         /*
1300                          * we had to split @insert and insert the committed
1301                          * part - need to bail out and recheck journal
1302                          * reservation/btree node before we advance pos past @k:
1303                          */
1304                         if (have_uncommitted)
1305                                 return BTREE_HOOK_NO_INSERT;
1306                         break;
1307                 case BTREE_HOOK_RESTART_TRANS:
1308                         return BTREE_HOOK_RESTART_TRANS;
1309                 }
1310         }
1311
1312         /* avoid redundant calls to hook fn: */
1313         if (!bkey_cmp(s->committed, next_pos))
1314                 return BTREE_HOOK_DO_INSERT;
1315
1316         return __extent_insert_advance_pos(s, next_pos, k);
1317 }
1318
1319 static enum btree_insert_ret
1320 extent_insert_check_split_compressed(struct extent_insert_state *s,
1321                                      struct bkey_s_c k,
1322                                      enum bch_extent_overlap overlap)
1323 {
1324         struct cache_set *c = s->trans->c;
1325         unsigned sectors;
1326
1327         if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
1328             (sectors = bkey_extent_is_compressed(k))) {
1329                 int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
1330
1331                 if (s->trans->flags & BTREE_INSERT_NOFAIL)
1332                         flags |= BCH_DISK_RESERVATION_NOFAIL;
1333
1334                 switch (bch_disk_reservation_add(c,
1335                                 s->trans->disk_res,
1336                                 sectors, flags)) {
1337                 case 0:
1338                         break;
1339                 case -ENOSPC:
1340                         return BTREE_INSERT_ENOSPC;
1341                 case -EINTR:
1342                         return BTREE_INSERT_NEED_GC_LOCK;
1343                 default:
1344                         BUG();
1345                 }
1346         }
1347
1348         return BTREE_INSERT_OK;
1349 }
1350
1351 static enum btree_insert_ret
1352 extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
1353               struct bset_tree *t, struct bkey_packed *_k, struct bkey_s k,
1354               enum bch_extent_overlap overlap)
1355 {
1356         struct cache_set *c = s->trans->c;
1357         struct btree_iter *iter = s->insert->iter;
1358         struct btree *b = iter->nodes[0];
1359         struct btree_node_iter *node_iter = &iter->node_iters[0];
1360
1361         switch (overlap) {
1362         case BCH_EXTENT_OVERLAP_FRONT:
1363                 /* insert overlaps with start of k: */
1364                 bch_cut_subtract_front(s, insert->k.p, k);
1365                 BUG_ON(bkey_deleted(k.k));
1366                 extent_save(b, node_iter, _k, k.k);
1367                 break;
1368
1369         case BCH_EXTENT_OVERLAP_BACK:
1370                 /* insert overlaps with end of k: */
1371                 bch_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
1372                 BUG_ON(bkey_deleted(k.k));
1373                 extent_save(b, node_iter, _k, k.k);
1374
1375                 /*
1376                  * As the auxiliary tree is indexed by the end of the
1377                  * key and we've just changed the end, update the
1378                  * auxiliary tree.
1379                  */
1380                 bch_bset_fix_invalidated_key(b, t, _k);
1381                 bch_btree_node_iter_fix(iter, b, node_iter, t,
1382                                         _k, _k->u64s, _k->u64s);
1383                 break;
1384
1385         case BCH_EXTENT_OVERLAP_ALL: {
1386                 struct bpos orig_pos = k.k->p;
1387
1388                 /* The insert key completely covers k, invalidate k */
1389                 if (!bkey_whiteout(k.k))
1390                         btree_keys_account_key_drop(&b->nr,
1391                                                 t - b->set, _k);
1392
1393                 bch_drop_subtract(s, k);
1394                 k.k->p = bkey_start_pos(&insert->k);
1395                 if (!__extent_save(b, node_iter, _k, k.k)) {
1396                         /*
1397                          * Couldn't repack: we aren't necessarily able
1398                          * to repack if the new key is outside the range
1399                          * of the old extent, so we have to split
1400                          * @insert:
1401                          */
1402                         k.k->p = orig_pos;
1403                         extent_save(b, node_iter, _k, k.k);
1404
1405                         if (extent_insert_advance_pos(s, k.s_c) ==
1406                             BTREE_HOOK_RESTART_TRANS)
1407                                 return BTREE_INSERT_NEED_TRAVERSE;
1408
1409                         extent_insert_committed(s);
1410                         /*
1411                          * We split and inserted upto at k.k->p - that
1412                          * has to coincide with iter->pos, so that we
1413                          * don't have anything more we have to insert
1414                          * until we recheck our journal reservation:
1415                          */
1416                         EBUG_ON(bkey_cmp(s->committed, k.k->p));
1417                 } else {
1418                         bch_bset_fix_invalidated_key(b, t, _k);
1419                         bch_btree_node_iter_fix(iter, b, node_iter, t,
1420                                                 _k, _k->u64s, _k->u64s);
1421                 }
1422
1423                 break;
1424         }
1425         case BCH_EXTENT_OVERLAP_MIDDLE: {
1426                 BKEY_PADDED(k) split;
1427                 /*
1428                  * The insert key falls 'in the middle' of k
1429                  * The insert key splits k in 3:
1430                  * - start only in k, preserve
1431                  * - middle common section, invalidate in k
1432                  * - end only in k, preserve
1433                  *
1434                  * We update the old key to preserve the start,
1435                  * insert will be the new common section,
1436                  * we manually insert the end that we are preserving.
1437                  *
1438                  * modify k _before_ doing the insert (which will move
1439                  * what k points to)
1440                  */
1441                 bkey_reassemble(&split.k, k.s_c);
1442                 split.k.k.needs_whiteout |= bset_written(b, bset(b, t));
1443
1444                 bch_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1445                 BUG_ON(bkey_deleted(&split.k.k));
1446
1447                 bch_cut_subtract_front(s, insert->k.p, k);
1448                 BUG_ON(bkey_deleted(k.k));
1449                 extent_save(b, node_iter, _k, k.k);
1450
1451                 bch_add_sectors(s, bkey_i_to_s_c(&split.k),
1452                                 bkey_start_offset(&split.k.k),
1453                                 split.k.k.size);
1454                 extent_bset_insert(c, iter, &split.k);
1455                 break;
1456         }
1457         }
1458
1459         return BTREE_INSERT_OK;
1460 }
1461
1462 static enum btree_insert_ret
1463 bch_delete_fixup_extent(struct extent_insert_state *s)
1464 {
1465         struct cache_set *c = s->trans->c;
1466         struct btree_iter *iter = s->insert->iter;
1467         struct btree *b = iter->nodes[0];
1468         struct btree_node_iter *node_iter = &iter->node_iters[0];
1469         struct bkey_packed *_k;
1470         struct bkey unpacked;
1471         struct bkey_i *insert = s->insert->k;
1472         enum btree_insert_ret ret = BTREE_INSERT_OK;
1473
1474         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1475
1476         s->whiteout     = *insert;
1477         s->do_journal   = false;
1478
1479         while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1480                (ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK &&
1481                (_k = bch_btree_node_iter_peek_all(node_iter, b))) {
1482                 struct bset_tree *t = bch_bkey_to_bset(b, _k);
1483                 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1484                 enum bch_extent_overlap overlap;
1485
1486                 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
1487                 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1488
1489                 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1490                         break;
1491
1492                 if (bkey_whiteout(k.k)) {
1493                         s->committed = bpos_min(insert->k.p, k.k->p);
1494                         goto next;
1495                 }
1496
1497                 overlap = bch_extent_overlap(&insert->k, k.k);
1498
1499                 ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
1500                 if (ret != BTREE_INSERT_OK)
1501                         goto stop;
1502
1503                 switch (extent_insert_advance_pos(s, k.s_c)) {
1504                 case BTREE_HOOK_DO_INSERT:
1505                         break;
1506                 case BTREE_HOOK_NO_INSERT:
1507                         continue;
1508                 case BTREE_HOOK_RESTART_TRANS:
1509                         ret = BTREE_INSERT_NEED_TRAVERSE;
1510                         goto stop;
1511                 }
1512
1513                 s->do_journal = true;
1514
1515                 if (overlap == BCH_EXTENT_OVERLAP_ALL) {
1516                         btree_keys_account_key_drop(&b->nr,
1517                                                 t - b->set, _k);
1518                         bch_subtract_sectors(s, k.s_c,
1519                                              bkey_start_offset(k.k), k.k->size);
1520                         _k->type = KEY_TYPE_DISCARD;
1521                         reserve_whiteout(b, t, _k);
1522                 } else if (k.k->needs_whiteout ||
1523                            bset_written(b, bset(b, t))) {
1524                         struct bkey_i discard = *insert;
1525
1526                         switch (overlap) {
1527                         case BCH_EXTENT_OVERLAP_FRONT:
1528                                 bch_cut_front(bkey_start_pos(k.k), &discard);
1529                                 break;
1530                         case BCH_EXTENT_OVERLAP_BACK:
1531                                 bch_cut_back(k.k->p, &discard.k);
1532                                 break;
1533                         default:
1534                                 break;
1535                         }
1536
1537                         discard.k.needs_whiteout = true;
1538
1539                         ret = extent_squash(s, insert, t, _k, k, overlap);
1540                         BUG_ON(ret != BTREE_INSERT_OK);
1541
1542                         extent_bset_insert(c, iter, &discard);
1543                 } else {
1544                         ret = extent_squash(s, insert, t, _k, k, overlap);
1545                         BUG_ON(ret != BTREE_INSERT_OK);
1546                 }
1547 next:
1548                 bch_cut_front(s->committed, insert);
1549                 bch_btree_iter_set_pos_same_leaf(iter, s->committed);
1550         }
1551
1552         if (bkey_cmp(s->committed, insert->k.p) < 0 &&
1553             ret == BTREE_INSERT_OK &&
1554             extent_insert_advance_pos(s, bkey_s_c_null) == BTREE_HOOK_RESTART_TRANS)
1555                 ret = BTREE_INSERT_NEED_TRAVERSE;
1556 stop:
1557         extent_insert_committed(s);
1558
1559         bch_fs_stats_apply(c, &s->stats, s->trans->disk_res,
1560                            gc_pos_btree_node(b));
1561
1562         EBUG_ON(bkey_cmp(iter->pos, s->committed));
1563         EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) != iter->at_end_of_leaf);
1564
1565         bch_cut_front(iter->pos, insert);
1566
1567         if (insert->k.size && iter->at_end_of_leaf)
1568                 ret = BTREE_INSERT_NEED_TRAVERSE;
1569
1570         EBUG_ON(insert->k.size && ret == BTREE_INSERT_OK);
1571
1572         return ret;
1573 }
1574
1575 /**
1576  * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1577  *
1578  * this may result in not actually doing the insert, or inserting some subset
1579  * of the insert key. For cmpxchg operations this is where that logic lives.
1580  *
1581  * All subsets of @insert that need to be inserted are inserted using
1582  * bch_btree_insert_and_journal(). If @b or @res fills up, this function
1583  * returns false, setting @iter->pos for the prefix of @insert that actually got
1584  * inserted.
1585  *
1586  * BSET INVARIANTS: this function is responsible for maintaining all the
1587  * invariants for bsets of extents in memory. things get really hairy with 0
1588  * size extents
1589  *
1590  * within one bset:
1591  *
1592  * bkey_start_pos(bkey_next(k)) >= k
1593  * or bkey_start_offset(bkey_next(k)) >= k->offset
1594  *
1595  * i.e. strict ordering, no overlapping extents.
1596  *
1597  * multiple bsets (i.e. full btree node):
1598  *
1599  * âˆ€ k, j
1600  *   k.size != 0 âˆ§ j.size != 0 â†’
1601  *     Â¬ (k > bkey_start_pos(j) âˆ§ k < j)
1602  *
1603  * i.e. no two overlapping keys _of nonzero size_
1604  *
1605  * We can't realistically maintain this invariant for zero size keys because of
1606  * the key merging done in bch_btree_insert_key() - for two mergeable keys k, j
1607  * there may be another 0 size key between them in another bset, and it will
1608  * thus overlap with the merged key.
1609  *
1610  * In addition, the end of iter->pos indicates how much has been processed.
1611  * If the end of iter->pos is not the same as the end of insert, then
1612  * key insertion needs to continue/be retried.
1613  */
1614 enum btree_insert_ret
1615 bch_insert_fixup_extent(struct btree_insert *trans,
1616                         struct btree_insert_entry *insert)
1617 {
1618         struct cache_set *c = trans->c;
1619         struct btree_iter *iter = insert->iter;
1620         struct btree *b = iter->nodes[0];
1621         struct btree_node_iter *node_iter = &iter->node_iters[0];
1622         struct bkey_packed *_k;
1623         struct bkey unpacked;
1624         enum btree_insert_ret ret = BTREE_INSERT_OK;
1625
1626         struct extent_insert_state s = {
1627                 .trans          = trans,
1628                 .insert         = insert,
1629                 .committed      = insert->iter->pos,
1630                 .deleting       = bkey_whiteout(&insert->k->k),
1631         };
1632
1633         EBUG_ON(iter->level);
1634         EBUG_ON(bkey_deleted(&insert->k->k) || !insert->k->k.size);
1635
1636         if (s.deleting)
1637                 return bch_delete_fixup_extent(&s);
1638
1639         /*
1640          * As we process overlapping extents, we advance @iter->pos both to
1641          * signal to our caller (btree_insert_key()) how much of @insert->k has
1642          * been inserted, and also to keep @iter->pos consistent with
1643          * @insert->k and the node iterator that we're advancing:
1644          */
1645         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1646
1647         if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1648                 bch_add_sectors(&s, bkey_i_to_s_c(insert->k),
1649                                 bkey_start_offset(&insert->k->k),
1650                                 insert->k->k.size);
1651
1652         while (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
1653                (ret = extent_insert_should_stop(&s)) == BTREE_INSERT_OK &&
1654                (_k = bch_btree_node_iter_peek_all(node_iter, b))) {
1655                 struct bset_tree *t = bch_bkey_to_bset(b, _k);
1656                 struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
1657                 enum bch_extent_overlap overlap;
1658
1659                 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1660                 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1661
1662                 if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0)
1663                         break;
1664
1665                 overlap = bch_extent_overlap(&insert->k->k, k.k);
1666
1667                 ret = extent_insert_check_split_compressed(&s, k.s_c, overlap);
1668                 if (ret != BTREE_INSERT_OK)
1669                         goto stop;
1670
1671                 if (!k.k->size)
1672                         goto squash;
1673
1674                 /*
1675                  * Only call advance pos & call hook for nonzero size extents:
1676                  * If hook returned BTREE_HOOK_NO_INSERT, @insert->k no longer
1677                  * overlaps with @k:
1678                  */
1679                 switch (extent_insert_advance_pos(&s, k.s_c)) {
1680                 case BTREE_HOOK_DO_INSERT:
1681                         break;
1682                 case BTREE_HOOK_NO_INSERT:
1683                         continue;
1684                 case BTREE_HOOK_RESTART_TRANS:
1685                         ret = BTREE_INSERT_NEED_TRAVERSE;
1686                         goto stop;
1687                 }
1688
1689                 if (k.k->size &&
1690                     (k.k->needs_whiteout || bset_written(b, bset(b, t))))
1691                         insert->k->k.needs_whiteout = true;
1692
1693                 if (overlap == BCH_EXTENT_OVERLAP_ALL &&
1694                     bkey_whiteout(k.k) &&
1695                     k.k->needs_whiteout) {
1696                         unreserve_whiteout(b, t, _k);
1697                         _k->needs_whiteout = false;
1698                 }
1699 squash:
1700                 ret = extent_squash(&s, insert->k, t, _k, k, overlap);
1701                 if (ret != BTREE_INSERT_OK)
1702                         goto stop;
1703         }
1704
1705         if (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
1706             ret == BTREE_INSERT_OK &&
1707             extent_insert_advance_pos(&s, bkey_s_c_null) == BTREE_HOOK_RESTART_TRANS)
1708                 ret = BTREE_INSERT_NEED_TRAVERSE;
1709 stop:
1710         extent_insert_committed(&s);
1711         /*
1712          * Subtract any remaining sectors from @insert, if we bailed out early
1713          * and didn't fully insert @insert:
1714          */
1715         if (insert->k->k.size &&
1716             !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
1717                 bch_subtract_sectors(&s, bkey_i_to_s_c(insert->k),
1718                                      bkey_start_offset(&insert->k->k),
1719                                      insert->k->k.size);
1720
1721         bch_fs_stats_apply(c, &s.stats, trans->disk_res,
1722                            gc_pos_btree_node(b));
1723
1724         EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1725         EBUG_ON(bkey_cmp(iter->pos, s.committed));
1726         EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) != iter->at_end_of_leaf);
1727
1728         if (insert->k->k.size && iter->at_end_of_leaf)
1729                 ret = BTREE_INSERT_NEED_TRAVERSE;
1730
1731         EBUG_ON(insert->k->k.size && ret == BTREE_INSERT_OK);
1732
1733         return ret;
1734 }
1735
1736 static const char *bch_extent_invalid(const struct cache_set *c,
1737                                       struct bkey_s_c k)
1738 {
1739         if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
1740                 return "value too big";
1741
1742         if (!k.k->size)
1743                 return "zero key size";
1744
1745         switch (k.k->type) {
1746         case BCH_EXTENT:
1747         case BCH_EXTENT_CACHED: {
1748                 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1749                 const union bch_extent_entry *entry;
1750                 const union bch_extent_crc *crc;
1751                 const struct bch_extent_ptr *ptr;
1752                 struct cache_member_rcu *mi = cache_member_info_get(c);
1753                 unsigned size_ondisk = e.k->size;
1754                 const char *reason;
1755
1756                 extent_for_each_entry(e, entry) {
1757                         reason = "invalid extent entry type";
1758                         if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1759                                 goto invalid;
1760
1761                         if (extent_entry_is_crc(entry)) {
1762                                 crc = entry_to_crc(entry);
1763
1764                                 reason = "checksum offset + key size > uncompressed size";
1765                                 if (crc_offset(crc) + e.k->size >
1766                                     crc_uncompressed_size(e.k, crc))
1767                                         goto invalid;
1768
1769                                 size_ondisk = crc_compressed_size(e.k, crc);
1770
1771                                 reason = "invalid checksum type";
1772                                 if (!bch_checksum_type_valid(c, crc_csum_type(crc)))
1773                                         goto invalid;
1774
1775                                 reason = "invalid compression type";
1776                                 if (crc_compression_type(crc) >= BCH_COMPRESSION_NR)
1777                                         goto invalid;
1778                         } else {
1779                                 ptr = entry_to_ptr(entry);
1780
1781                                 reason = extent_ptr_invalid(e, mi,
1782                                                 &entry->ptr, size_ondisk);
1783                                 if (reason)
1784                                         goto invalid;
1785                         }
1786                 }
1787
1788                 cache_member_info_put();
1789                 return NULL;
1790 invalid:
1791                 cache_member_info_put();
1792                 return reason;
1793         }
1794
1795         case BCH_RESERVATION: {
1796                 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1797
1798                 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1799                         return "incorrect value size";
1800
1801                 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1802                         return "invalid nr_replicas";
1803
1804                 return NULL;
1805         }
1806
1807         default:
1808                 return "invalid value type";
1809         }
1810 }
1811
1812 static void bch_extent_debugcheck_extent(struct cache_set *c, struct btree *b,
1813                                          struct bkey_s_c_extent e)
1814 {
1815         const struct bch_extent_ptr *ptr;
1816         struct cache_member_rcu *mi;
1817         struct cache *ca;
1818         struct bucket *g;
1819         unsigned seq, stale;
1820         char buf[160];
1821         bool bad;
1822         unsigned ptrs_per_tier[BCH_TIER_MAX];
1823         unsigned tier, replicas = 0;
1824
1825         /*
1826          * XXX: we should be doing most/all of these checks at startup time,
1827          * where we check bkey_invalid() in btree_node_read_done()
1828          *
1829          * But note that we can't check for stale pointers or incorrect gc marks
1830          * until after journal replay is done (it might be an extent that's
1831          * going to get overwritten during replay)
1832          */
1833
1834         memset(ptrs_per_tier, 0, sizeof(ptrs_per_tier));
1835
1836         mi = cache_member_info_get(c);
1837
1838         extent_for_each_ptr(e, ptr) {
1839                 replicas++;
1840
1841                 if (ptr->dev >= mi->nr_devices)
1842                         goto bad_device;
1843
1844                 /*
1845                  * If journal replay hasn't finished, we might be seeing keys
1846                  * that will be overwritten by the time journal replay is done:
1847                  */
1848                 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1849                         continue;
1850
1851                 if (!mi->m[ptr->dev].valid)
1852                         goto bad_device;
1853
1854                 tier = mi->m[ptr->dev].tier;
1855                 ptrs_per_tier[tier]++;
1856
1857                 stale = 0;
1858
1859                 if ((ca = PTR_CACHE(c, ptr))) {
1860                         g = PTR_BUCKET(ca, ptr);
1861
1862                         do {
1863                                 struct bucket_mark mark;
1864
1865                                 seq = read_seqcount_begin(&c->gc_pos_lock);
1866                                 mark = READ_ONCE(g->mark);
1867
1868                                 /* between mark and bucket gen */
1869                                 smp_rmb();
1870
1871                                 stale = ptr_stale(ca, ptr);
1872
1873                                 bch_fs_bug_on(stale && !ptr->cached, c,
1874                                                  "stale dirty pointer");
1875
1876                                 bch_fs_bug_on(stale > 96, c,
1877                                                  "key too stale: %i",
1878                                                  stale);
1879
1880                                 if (stale)
1881                                         break;
1882
1883                                 bad = (mark.is_metadata ||
1884                                        (gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
1885                                         !mark.owned_by_allocator &&
1886                                         !(ptr->cached
1887                                           ? mark.cached_sectors
1888                                           : mark.dirty_sectors)));
1889                         } while (read_seqcount_retry(&c->gc_pos_lock, seq));
1890
1891                         if (bad)
1892                                 goto bad_ptr;
1893                 }
1894         }
1895         cache_member_info_put();
1896
1897         if (replicas > BCH_REPLICAS_MAX) {
1898                 bch_bkey_val_to_text(c, btree_node_type(b), buf,
1899                                      sizeof(buf), e.s_c);
1900                 bch_fs_bug(c,
1901                         "extent key bad (too many replicas: %u): %s",
1902                         replicas, buf);
1903                 return;
1904         }
1905
1906         if (!bkey_extent_is_cached(e.k) &&
1907             replicas < c->sb.data_replicas_have) {
1908                 bch_bkey_val_to_text(c, btree_node_type(b), buf,
1909                                      sizeof(buf), e.s_c);
1910                 bch_fs_bug(c,
1911                         "extent key bad (too few replicas, %u < %u): %s",
1912                         replicas, c->sb.data_replicas_have, buf);
1913                 return;
1914         }
1915
1916         return;
1917
1918 bad_device:
1919         bch_bkey_val_to_text(c, btree_node_type(b), buf,
1920                              sizeof(buf), e.s_c);
1921         bch_fs_bug(c, "extent pointer to dev %u missing device: %s",
1922                    ptr->dev, buf);
1923         cache_member_info_put();
1924         return;
1925
1926 bad_ptr:
1927         bch_bkey_val_to_text(c, btree_node_type(b), buf,
1928                              sizeof(buf), e.s_c);
1929         bch_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu prio %i "
1930                    "gen %i last_gc %i mark 0x%08x",
1931                    buf, PTR_BUCKET_NR(ca, ptr),
1932                    g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen,
1933                    ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
1934                    (unsigned) g->mark.counter);
1935         cache_member_info_put();
1936         return;
1937 }
1938
1939 static void bch_extent_debugcheck(struct cache_set *c, struct btree *b,
1940                                   struct bkey_s_c k)
1941 {
1942         switch (k.k->type) {
1943         case BCH_EXTENT:
1944         case BCH_EXTENT_CACHED:
1945                 bch_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
1946                 break;
1947         case BCH_RESERVATION:
1948                 break;
1949         default:
1950                 BUG();
1951         }
1952 }
1953
1954 static void bch_extent_to_text(struct cache_set *c, char *buf,
1955                                size_t size, struct bkey_s_c k)
1956 {
1957         char *out = buf, *end = buf + size;
1958         const char *invalid;
1959
1960 #define p(...)  (out += scnprintf(out, end - out, __VA_ARGS__))
1961
1962         if (bkey_extent_is_data(k.k))
1963                 out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
1964
1965         invalid = bch_extent_invalid(c, k);
1966         if (invalid)
1967                 p(" invalid: %s", invalid);
1968 #undef p
1969 }
1970
1971 static unsigned PTR_TIER(struct cache_member_rcu *mi,
1972                          const struct bch_extent_ptr *ptr)
1973 {
1974         return ptr->dev < mi->nr_devices
1975                 ? mi->m[ptr->dev].tier
1976                 : UINT_MAX;
1977 }
1978
1979 static void bch_extent_crc_init(union bch_extent_crc *crc,
1980                                 unsigned compressed_size,
1981                                 unsigned uncompressed_size,
1982                                 unsigned compression_type,
1983                                 unsigned nonce,
1984                                 struct bch_csum csum, unsigned csum_type)
1985 {
1986         if (bch_crc_bytes[csum_type]    <= 4 &&
1987             uncompressed_size           <= CRC32_SIZE_MAX &&
1988             nonce                       <= CRC32_NONCE_MAX) {
1989                 crc->crc32 = (struct bch_extent_crc32) {
1990                         .type = 1 << BCH_EXTENT_ENTRY_crc32,
1991                         ._compressed_size       = compressed_size - 1,
1992                         ._uncompressed_size     = uncompressed_size - 1,
1993                         .offset                 = 0,
1994                         .compression_type       = compression_type,
1995                         .csum_type              = csum_type,
1996                         .csum                   = *((__le32 *) &csum.lo),
1997                 };
1998                 return;
1999         }
2000
2001         if (bch_crc_bytes[csum_type]    <= 10 &&
2002             uncompressed_size           <= CRC64_SIZE_MAX &&
2003             nonce                       <= CRC64_NONCE_MAX) {
2004                 crc->crc64 = (struct bch_extent_crc64) {
2005                         .type = 1 << BCH_EXTENT_ENTRY_crc64,
2006                         ._compressed_size       = compressed_size - 1,
2007                         ._uncompressed_size     = uncompressed_size - 1,
2008                         .offset                 = 0,
2009                         .nonce                  = nonce,
2010                         .compression_type       = compression_type,
2011                         .csum_type              = csum_type,
2012                         .csum_lo                = csum.lo,
2013                         .csum_hi                = *((__le16 *) &csum.hi),
2014                 };
2015                 return;
2016         }
2017
2018         if (bch_crc_bytes[csum_type]    <= 16 &&
2019             uncompressed_size           <= CRC128_SIZE_MAX &&
2020             nonce                       <= CRC128_NONCE_MAX) {
2021                 crc->crc128 = (struct bch_extent_crc128) {
2022                         .type = 1 << BCH_EXTENT_ENTRY_crc128,
2023                         ._compressed_size       = compressed_size - 1,
2024                         ._uncompressed_size     = uncompressed_size - 1,
2025                         .offset                 = 0,
2026                         .nonce                  = nonce,
2027                         .compression_type       = compression_type,
2028                         .csum_type              = csum_type,
2029                         .csum                   = csum,
2030                 };
2031                 return;
2032         }
2033
2034         BUG();
2035 }
2036
2037 void bch_extent_crc_append(struct bkey_i_extent *e,
2038                            unsigned compressed_size,
2039                            unsigned uncompressed_size,
2040                            unsigned compression_type,
2041                            unsigned nonce,
2042                            struct bch_csum csum, unsigned csum_type)
2043 {
2044         union bch_extent_crc *crc;
2045
2046         BUG_ON(compressed_size > uncompressed_size);
2047         BUG_ON(uncompressed_size != e->k.size);
2048         BUG_ON(!compressed_size || !uncompressed_size);
2049
2050         /*
2051          * Look up the last crc entry, so we can check if we need to add
2052          * another:
2053          */
2054         extent_for_each_crc(extent_i_to_s(e), crc)
2055                 ;
2056
2057         if (!crc && !csum_type && !compression_type)
2058                 return;
2059
2060         if (crc &&
2061             crc_compressed_size(&e->k, crc)     == compressed_size &&
2062             crc_uncompressed_size(&e->k, crc)   == uncompressed_size &&
2063             crc_offset(crc)                     == 0 &&
2064             crc_nonce(crc)                      == nonce &&
2065             crc_csum_type(crc)                  == csum_type &&
2066             crc_compression_type(crc)           == compression_type &&
2067             crc_csum(crc).lo                    == csum.lo &&
2068             crc_csum(crc).hi                    == csum.hi)
2069                 return;
2070
2071         bch_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)),
2072                             compressed_size,
2073                             uncompressed_size,
2074                             compression_type,
2075                             nonce, csum, csum_type);
2076         __extent_entry_push(e);
2077 }
2078
2079 /*
2080  * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
2081  *
2082  * Returns true if @k should be dropped entirely
2083  *
2084  * For existing keys, only called when btree nodes are being rewritten, not when
2085  * they're merely being compacted/resorted in memory.
2086  */
2087 bool bch_extent_normalize(struct cache_set *c, struct bkey_s k)
2088 {
2089         struct bkey_s_extent e;
2090
2091         switch (k.k->type) {
2092         case KEY_TYPE_ERROR:
2093                 return false;
2094
2095         case KEY_TYPE_DELETED:
2096         case KEY_TYPE_COOKIE:
2097                 return true;
2098
2099         case KEY_TYPE_DISCARD:
2100                 return bversion_zero(k.k->version);
2101
2102         case BCH_EXTENT:
2103         case BCH_EXTENT_CACHED:
2104                 e = bkey_s_to_extent(k);
2105
2106                 bch_extent_drop_stale(c, e);
2107
2108                 if (!bkey_val_u64s(e.k)) {
2109                         if (bkey_extent_is_cached(e.k)) {
2110                                 k.k->type = KEY_TYPE_DISCARD;
2111                                 if (bversion_zero(k.k->version))
2112                                         return true;
2113                         } else {
2114                                 k.k->type = KEY_TYPE_ERROR;
2115                         }
2116                 }
2117
2118                 return false;
2119         case BCH_RESERVATION:
2120                 return false;
2121         default:
2122                 BUG();
2123         }
2124 }
2125
2126 void bch_extent_mark_replicas_cached(struct cache_set *c,
2127                                      struct bkey_s_extent e,
2128                                      unsigned nr_cached)
2129 {
2130         struct bch_extent_ptr *ptr;
2131         struct cache_member_rcu *mi;
2132         bool have_higher_tier;
2133         unsigned tier = 0;
2134
2135         if (!nr_cached)
2136                 return;
2137
2138         mi = cache_member_info_get(c);
2139
2140         do {
2141                 have_higher_tier = false;
2142
2143                 extent_for_each_ptr(e, ptr) {
2144                         if (!ptr->cached &&
2145                             PTR_TIER(mi, ptr) == tier) {
2146                                 ptr->cached = true;
2147                                 nr_cached--;
2148                                 if (!nr_cached)
2149                                         goto out;
2150                         }
2151
2152                         if (PTR_TIER(mi, ptr) > tier)
2153                                 have_higher_tier = true;
2154                 }
2155
2156                 tier++;
2157         } while (have_higher_tier);
2158 out:
2159         cache_member_info_put();
2160 }
2161
2162 /*
2163  * This picks a non-stale pointer, preferabbly from a device other than
2164  * avoid.  Avoid can be NULL, meaning pick any.  If there are no non-stale
2165  * pointers to other devices, it will still pick a pointer from avoid.
2166  * Note that it prefers lowered-numbered pointers to higher-numbered pointers
2167  * as the pointers are sorted by tier, hence preferring pointers to tier 0
2168  * rather than pointers to tier 1.
2169  */
2170 void bch_extent_pick_ptr_avoiding(struct cache_set *c, struct bkey_s_c k,
2171                                   struct cache *avoid,
2172                                   struct extent_pick_ptr *ret)
2173 {
2174         struct bkey_s_c_extent e;
2175         const union bch_extent_crc *crc;
2176         const struct bch_extent_ptr *ptr;
2177         struct cache *ca;
2178
2179         switch (k.k->type) {
2180         case KEY_TYPE_DELETED:
2181         case KEY_TYPE_DISCARD:
2182         case KEY_TYPE_COOKIE:
2183                 ret->ca = NULL;
2184                 return;
2185
2186         case KEY_TYPE_ERROR:
2187                 ret->ca = ERR_PTR(-EIO);
2188                 return;
2189
2190         case BCH_EXTENT:
2191         case BCH_EXTENT_CACHED:
2192                 e = bkey_s_c_to_extent(k);
2193                 rcu_read_lock();
2194                 ret->ca = NULL;
2195
2196                 extent_for_each_online_device_crc(c, e, crc, ptr, ca)
2197                         if (!ptr_stale(ca, ptr)) {
2198                                 *ret = (struct extent_pick_ptr) {
2199                                         .crc = crc_to_128(e.k, crc),
2200                                         .ptr = *ptr,
2201                                         .ca = ca,
2202                                 };
2203
2204                                 if (ca != avoid)
2205                                         break;
2206                         }
2207
2208                 if (ret->ca)
2209                         percpu_ref_get(&ret->ca->ref);
2210                 else if (!bkey_extent_is_cached(e.k))
2211                         ret->ca = ERR_PTR(-EIO);
2212
2213                 rcu_read_unlock();
2214                 return;
2215
2216         case BCH_RESERVATION:
2217                 ret->ca = NULL;
2218                 return;
2219
2220         default:
2221                 BUG();
2222         }
2223 }
2224
2225 static enum merge_result bch_extent_merge(struct cache_set *c,
2226                                           struct btree *bk,
2227                                           struct bkey_i *l, struct bkey_i *r)
2228 {
2229         struct bkey_s_extent el, er;
2230         union bch_extent_entry *en_l, *en_r;
2231
2232         if (key_merging_disabled(c))
2233                 return BCH_MERGE_NOMERGE;
2234
2235         /*
2236          * Generic header checks
2237          * Assumes left and right are in order
2238          * Left and right must be exactly aligned
2239          */
2240
2241         if (l->k.u64s           != r->k.u64s ||
2242             l->k.type           != r->k.type ||
2243             bversion_cmp(l->k.version, r->k.version) ||
2244             bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
2245                 return BCH_MERGE_NOMERGE;
2246
2247         switch (l->k.type) {
2248         case KEY_TYPE_DELETED:
2249         case KEY_TYPE_DISCARD:
2250         case KEY_TYPE_ERROR:
2251                 /* These types are mergeable, and no val to check */
2252                 break;
2253
2254         case BCH_EXTENT:
2255         case BCH_EXTENT_CACHED:
2256                 el = bkey_i_to_s_extent(l);
2257                 er = bkey_i_to_s_extent(r);
2258
2259                 extent_for_each_entry(el, en_l) {
2260                         struct bch_extent_ptr *lp, *rp;
2261                         struct cache_member_cpu *m;
2262
2263                         en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
2264
2265                         if ((extent_entry_type(en_l) !=
2266                              extent_entry_type(en_r)) ||
2267                             extent_entry_is_crc(en_l))
2268                                 return BCH_MERGE_NOMERGE;
2269
2270                         lp = &en_l->ptr;
2271                         rp = &en_r->ptr;
2272
2273                         if (lp->offset + el.k->size     != rp->offset ||
2274                             lp->dev                     != rp->dev ||
2275                             lp->gen                     != rp->gen)
2276                                 return BCH_MERGE_NOMERGE;
2277
2278                         /* We don't allow extents to straddle buckets: */
2279
2280                         m = cache_member_info_get(c)->m + lp->dev;
2281                         if ((lp->offset & ~((u64) m->bucket_size - 1)) !=
2282                             (rp->offset & ~((u64) m->bucket_size - 1))) {
2283                                 cache_member_info_put();
2284                                 return BCH_MERGE_NOMERGE;
2285
2286                         }
2287                         cache_member_info_put();
2288                 }
2289
2290                 break;
2291         case BCH_RESERVATION: {
2292                 struct bkey_i_reservation *li = bkey_i_to_reservation(l);
2293                 struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
2294
2295                 if (li->v.generation != ri->v.generation ||
2296                     li->v.nr_replicas != ri->v.nr_replicas)
2297                         return BCH_MERGE_NOMERGE;
2298                 break;
2299         }
2300         default:
2301                 return BCH_MERGE_NOMERGE;
2302         }
2303
2304         l->k.needs_whiteout |= r->k.needs_whiteout;
2305
2306         /* Keys with no pointers aren't restricted to one bucket and could
2307          * overflow KEY_SIZE
2308          */
2309         if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
2310                 bch_key_resize(&l->k, KEY_SIZE_MAX);
2311                 bch_cut_front(l->k.p, r);
2312                 return BCH_MERGE_PARTIAL;
2313         }
2314
2315         bch_key_resize(&l->k, l->k.size + r->k.size);
2316
2317         return BCH_MERGE_MERGE;
2318 }
2319
2320 static void extent_i_save(struct btree *b, struct bkey_packed *dst,
2321                           struct bkey_i *src)
2322 {
2323         struct bkey_format *f = &b->format;
2324         struct bkey_i *dst_unpacked;
2325
2326         BUG_ON(bkeyp_val_u64s(f, dst) != bkey_val_u64s(&src->k));
2327
2328         /*
2329          * We don't want the bch_verify_key_order() call in extent_save(),
2330          * because we may be out of order with deleted keys that are about to be
2331          * removed by extent_bset_insert()
2332          */
2333
2334         if ((dst_unpacked = packed_to_bkey(dst)))
2335                 bkey_copy(dst_unpacked, src);
2336         else
2337                 BUG_ON(!bkey_pack(dst, src, f));
2338 }
2339
2340 static bool extent_merge_one_overlapping(struct btree_iter *iter,
2341                                          struct bpos new_pos,
2342                                          struct bset_tree *t,
2343                                          struct bkey_packed *k, struct bkey uk,
2344                                          bool check, bool could_pack)
2345 {
2346         struct btree *b = iter->nodes[0];
2347         struct btree_node_iter *node_iter = &iter->node_iters[0];
2348
2349         BUG_ON(!bkey_deleted(k));
2350
2351         if (check) {
2352                 return !bkey_packed(k) || could_pack;
2353         } else {
2354                 uk.p = new_pos;
2355                 extent_save(b, node_iter, k, &uk);
2356                 bch_bset_fix_invalidated_key(b, t, k);
2357                 bch_btree_node_iter_fix(iter, b, node_iter, t,
2358                                         k, k->u64s, k->u64s);
2359                 return true;
2360         }
2361 }
2362
2363 static bool extent_merge_do_overlapping(struct btree_iter *iter,
2364                                         struct bkey *m, bool back_merge)
2365 {
2366         struct btree *b = iter->nodes[0];
2367         struct btree_node_iter *node_iter = &iter->node_iters[0];
2368         struct bset_tree *t;
2369         struct bkey_packed *k;
2370         struct bkey uk;
2371         struct bpos new_pos = back_merge ? m->p : bkey_start_pos(m);
2372         bool could_pack = bkey_pack_pos((void *) &uk, new_pos, b);
2373         bool check = true;
2374
2375         /*
2376          * @m is the new merged extent:
2377          *
2378          * The merge took place in the last bset; we know there can't be any 0
2379          * size extents overlapping with m there because if so they would have
2380          * been between the two extents we merged.
2381          *
2382          * But in the other bsets, we have to check for and fix such extents:
2383          */
2384 do_fixup:
2385         for_each_bset(b, t) {
2386                 if (t == bset_tree_last(b))
2387                         break;
2388
2389                 /*
2390                  * if we don't find this bset in the iterator we already got to
2391                  * the end of that bset, so start searching from the end.
2392                  */
2393                 k = bch_btree_node_iter_bset_pos(node_iter, b, t);
2394
2395                 if (k == btree_bkey_last(b, t))
2396                         k = bkey_prev_all(b, t, k);
2397                 if (!k)
2398                         continue;
2399
2400                 if (back_merge) {
2401                         /*
2402                          * Back merge: 0 size extents will be before the key
2403                          * that was just inserted (and thus the iterator
2404                          * position) - walk backwards to find them
2405                          */
2406                         for (;
2407                              k &&
2408                              (uk = bkey_unpack_key(b, k),
2409                               bkey_cmp(uk.p, bkey_start_pos(m)) > 0);
2410                              k = bkey_prev_all(b, t, k)) {
2411                                 if (bkey_cmp(uk.p, m->p) >= 0)
2412                                         continue;
2413
2414                                 if (!extent_merge_one_overlapping(iter, new_pos,
2415                                                 t, k, uk, check, could_pack))
2416                                         return false;
2417                         }
2418                 } else {
2419                         /* Front merge - walk forwards */
2420                         for (;
2421                              k != btree_bkey_last(b, t) &&
2422                              (uk = bkey_unpack_key(b, k),
2423                               bkey_cmp(uk.p, m->p) < 0);
2424                              k = bkey_next(k)) {
2425                                 if (bkey_cmp(uk.p,
2426                                              bkey_start_pos(m)) <= 0)
2427                                         continue;
2428
2429                                 if (!extent_merge_one_overlapping(iter, new_pos,
2430                                                 t, k, uk, check, could_pack))
2431                                         return false;
2432                         }
2433                 }
2434         }
2435
2436         if (check) {
2437                 check = false;
2438                 goto do_fixup;
2439         }
2440
2441         return true;
2442 }
2443
2444 /*
2445  * When merging an extent that we're inserting into a btree node, the new merged
2446  * extent could overlap with an existing 0 size extent - if we don't fix that,
2447  * it'll break the btree node iterator so this code finds those 0 size extents
2448  * and shifts them out of the way.
2449  *
2450  * Also unpacks and repacks.
2451  */
2452 static bool bch_extent_merge_inline(struct cache_set *c,
2453                                     struct btree_iter *iter,
2454                                     struct bkey_packed *l,
2455                                     struct bkey_packed *r,
2456                                     bool back_merge)
2457 {
2458         struct btree *b = iter->nodes[0];
2459         struct btree_node_iter *node_iter = &iter->node_iters[0];
2460         const struct bkey_format *f = &b->format;
2461         struct bset_tree *t = bset_tree_last(b);
2462         struct bkey_packed *m;
2463         BKEY_PADDED(k) li;
2464         BKEY_PADDED(k) ri;
2465         struct bkey_i *mi;
2466         struct bkey tmp;
2467
2468         /*
2469          * We need to save copies of both l and r, because we might get a
2470          * partial merge (which modifies both) and then fails to repack
2471          */
2472         bkey_unpack(b, &li.k, l);
2473         bkey_unpack(b, &ri.k, r);
2474
2475         m = back_merge ? l : r;
2476         mi = back_merge ? &li.k : &ri.k;
2477
2478         /* l & r should be in last bset: */
2479         EBUG_ON(bch_bkey_to_bset(b, m) != t);
2480
2481         switch (bch_extent_merge(c, b, &li.k, &ri.k)) {
2482         case BCH_MERGE_NOMERGE:
2483                 return false;
2484         case BCH_MERGE_PARTIAL:
2485                 if (bkey_packed(m) && !bkey_pack_key((void *) &tmp, &mi->k, f))
2486                         return false;
2487
2488                 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2489                         return false;
2490
2491                 extent_i_save(b, m, mi);
2492                 bch_bset_fix_invalidated_key(b, t, m);
2493
2494                 /*
2495                  * Update iterator to reflect what we just inserted - otherwise,
2496                  * the iter_fix() call is going to put us _before_ the key we
2497                  * just partially merged with:
2498                  */
2499                 if (back_merge)
2500                         bch_btree_iter_set_pos_same_leaf(iter, li.k.k.p);
2501
2502                 bch_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
2503                                         t, m, m->u64s, m->u64s);
2504
2505                 if (!back_merge)
2506                         bkey_copy(packed_to_bkey(l), &li.k);
2507                 else
2508                         bkey_copy(packed_to_bkey(r), &ri.k);
2509                 return false;
2510         case BCH_MERGE_MERGE:
2511                 if (bkey_packed(m) && !bkey_pack_key((void *) &tmp, &li.k.k, f))
2512                         return false;
2513
2514                 if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
2515                         return false;
2516
2517                 extent_i_save(b, m, &li.k);
2518                 bch_bset_fix_invalidated_key(b, t, m);
2519
2520                 bch_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
2521                                         t, m, m->u64s, m->u64s);
2522                 return true;
2523         default:
2524                 BUG();
2525         }
2526 }
2527
2528 const struct bkey_ops bch_bkey_extent_ops = {
2529         .key_invalid    = bch_extent_invalid,
2530         .key_debugcheck = bch_extent_debugcheck,
2531         .val_to_text    = bch_extent_to_text,
2532         .swab           = bch_ptr_swab,
2533         .key_normalize  = bch_ptr_normalize,
2534         .key_merge      = bch_extent_merge,
2535         .is_extents     = true,
2536 };