]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
Update bcachefs sources to e3a7cee503 bcachefs: Don't mark superblocks past end of...
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "bset.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "ec.h"
15 #include "error.h"
16 #include "movinggc.h"
17 #include "reflink.h"
18 #include "replicas.h"
19
20 #include <linux/preempt.h>
21 #include <trace/events/bcachefs.h>
22
23 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
24                                               enum bch_data_type data_type,
25                                               s64 sectors)
26 {
27         switch (data_type) {
28         case BCH_DATA_btree:
29                 fs_usage->btree         += sectors;
30                 break;
31         case BCH_DATA_user:
32         case BCH_DATA_parity:
33                 fs_usage->data          += sectors;
34                 break;
35         case BCH_DATA_cached:
36                 fs_usage->cached        += sectors;
37                 break;
38         default:
39                 break;
40         }
41 }
42
43 /*
44  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
45  * wraparound:
46  */
47 void bch2_bucket_seq_cleanup(struct bch_fs *c)
48 {
49         u64 journal_seq = atomic64_read(&c->journal.seq);
50         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
51         struct bch_dev *ca;
52         struct bucket_array *buckets;
53         struct bucket *g;
54         struct bucket_mark m;
55         unsigned i;
56
57         if (journal_seq - c->last_bucket_seq_cleanup <
58             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
59                 return;
60
61         c->last_bucket_seq_cleanup = journal_seq;
62
63         for_each_member_device(ca, c, i) {
64                 down_read(&ca->bucket_lock);
65                 buckets = bucket_array(ca);
66
67                 for_each_bucket(g, buckets) {
68                         bucket_cmpxchg(g, m, ({
69                                 if (!m.journal_seq_valid ||
70                                     bucket_needs_journal_commit(m, last_seq_ondisk))
71                                         break;
72
73                                 m.journal_seq_valid = 0;
74                         }));
75                 }
76                 up_read(&ca->bucket_lock);
77         }
78 }
79
80 void bch2_fs_usage_initialize(struct bch_fs *c)
81 {
82         struct bch_fs_usage *usage;
83         struct bch_dev *ca;
84         unsigned i;
85
86         percpu_down_write(&c->mark_lock);
87         usage = c->usage_base;
88
89         for (i = 0; i < ARRAY_SIZE(c->usage); i++)
90                 bch2_fs_usage_acc_to_base(c, i);
91
92         for (i = 0; i < BCH_REPLICAS_MAX; i++)
93                 usage->reserved += usage->persistent_reserved[i];
94
95         for (i = 0; i < c->replicas.nr; i++) {
96                 struct bch_replicas_entry *e =
97                         cpu_replicas_entry(&c->replicas, i);
98
99                 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
100         }
101
102         for_each_member_device(ca, c, i) {
103                 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
104
105                 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
106                                   dev.d[BCH_DATA_journal].buckets) *
107                         ca->mi.bucket_size;
108         }
109
110         percpu_up_write(&c->mark_lock);
111 }
112
113 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
114                                                   unsigned journal_seq,
115                                                   bool gc)
116 {
117         return this_cpu_ptr(gc
118                             ? ca->usage_gc
119                             : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
120 }
121
122 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
123 {
124         struct bch_fs *c = ca->fs;
125         struct bch_dev_usage ret;
126         unsigned seq, i, u64s = dev_usage_u64s();
127
128         do {
129                 seq = read_seqcount_begin(&c->usage_lock);
130                 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
131                 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
132                         acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
133         } while (read_seqcount_retry(&c->usage_lock, seq));
134
135         return ret;
136 }
137
138 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
139                                                 unsigned journal_seq,
140                                                 bool gc)
141 {
142         return this_cpu_ptr(gc
143                             ? c->usage_gc
144                             : c->usage[journal_seq & JOURNAL_BUF_MASK]);
145 }
146
147 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
148 {
149         ssize_t offset = v - (u64 *) c->usage_base;
150         unsigned i, seq;
151         u64 ret;
152
153         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
154         percpu_rwsem_assert_held(&c->mark_lock);
155
156         do {
157                 seq = read_seqcount_begin(&c->usage_lock);
158                 ret = *v;
159
160                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
161                         ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
162         } while (read_seqcount_retry(&c->usage_lock, seq));
163
164         return ret;
165 }
166
167 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
168 {
169         struct bch_fs_usage_online *ret;
170         unsigned seq, i, u64s;
171
172         percpu_down_read(&c->mark_lock);
173
174         ret = kmalloc(sizeof(struct bch_fs_usage_online) +
175                       sizeof(u64) * c->replicas.nr, GFP_NOFS);
176         if (unlikely(!ret)) {
177                 percpu_up_read(&c->mark_lock);
178                 return NULL;
179         }
180
181         ret->online_reserved = percpu_u64_get(c->online_reserved);
182
183         u64s = fs_usage_u64s(c);
184         do {
185                 seq = read_seqcount_begin(&c->usage_lock);
186                 memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
187                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
188                         acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
189         } while (read_seqcount_retry(&c->usage_lock, seq));
190
191         return ret;
192 }
193
194 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
195 {
196         struct bch_dev *ca;
197         unsigned i, u64s = fs_usage_u64s(c);
198
199         BUG_ON(idx >= ARRAY_SIZE(c->usage));
200
201         preempt_disable();
202         write_seqcount_begin(&c->usage_lock);
203
204         acc_u64s_percpu((u64 *) c->usage_base,
205                         (u64 __percpu *) c->usage[idx], u64s);
206         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
207
208         rcu_read_lock();
209         for_each_member_device_rcu(ca, c, i, NULL) {
210                 u64s = dev_usage_u64s();
211
212                 acc_u64s_percpu((u64 *) ca->usage_base,
213                                 (u64 __percpu *) ca->usage[idx], u64s);
214                 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
215         }
216         rcu_read_unlock();
217
218         write_seqcount_end(&c->usage_lock);
219         preempt_enable();
220 }
221
222 void bch2_fs_usage_to_text(struct printbuf *out,
223                            struct bch_fs *c,
224                            struct bch_fs_usage_online *fs_usage)
225 {
226         unsigned i;
227
228         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
229
230         pr_buf(out, "hidden:\t\t\t\t%llu\n",
231                fs_usage->u.hidden);
232         pr_buf(out, "data:\t\t\t\t%llu\n",
233                fs_usage->u.data);
234         pr_buf(out, "cached:\t\t\t\t%llu\n",
235                fs_usage->u.cached);
236         pr_buf(out, "reserved:\t\t\t%llu\n",
237                fs_usage->u.reserved);
238         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
239                fs_usage->u.nr_inodes);
240         pr_buf(out, "online reserved:\t\t%llu\n",
241                fs_usage->online_reserved);
242
243         for (i = 0;
244              i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
245              i++) {
246                 pr_buf(out, "%u replicas:\n", i + 1);
247                 pr_buf(out, "\treserved:\t\t%llu\n",
248                        fs_usage->u.persistent_reserved[i]);
249         }
250
251         for (i = 0; i < c->replicas.nr; i++) {
252                 struct bch_replicas_entry *e =
253                         cpu_replicas_entry(&c->replicas, i);
254
255                 pr_buf(out, "\t");
256                 bch2_replicas_entry_to_text(out, e);
257                 pr_buf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
258         }
259 }
260
261 static u64 reserve_factor(u64 r)
262 {
263         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
264 }
265
266 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
267 {
268         return min(fs_usage->u.hidden +
269                    fs_usage->u.btree +
270                    fs_usage->u.data +
271                    reserve_factor(fs_usage->u.reserved +
272                                   fs_usage->online_reserved),
273                    c->capacity);
274 }
275
276 static struct bch_fs_usage_short
277 __bch2_fs_usage_read_short(struct bch_fs *c)
278 {
279         struct bch_fs_usage_short ret;
280         u64 data, reserved;
281
282         ret.capacity = c->capacity -
283                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
284
285         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
286                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
287         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
288                 percpu_u64_get(c->online_reserved);
289
290         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
291         ret.free        = ret.capacity - ret.used;
292
293         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
294
295         return ret;
296 }
297
298 struct bch_fs_usage_short
299 bch2_fs_usage_read_short(struct bch_fs *c)
300 {
301         struct bch_fs_usage_short ret;
302
303         percpu_down_read(&c->mark_lock);
304         ret = __bch2_fs_usage_read_short(c);
305         percpu_up_read(&c->mark_lock);
306
307         return ret;
308 }
309
310 static inline int is_unavailable_bucket(struct bucket_mark m)
311 {
312         return !is_available_bucket(m);
313 }
314
315 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
316                                             struct bucket_mark m)
317 {
318         return bucket_sectors_used(m)
319                 ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
320                 : 0;
321 }
322
323 static inline int is_stripe_data_bucket(struct bucket_mark m)
324 {
325         return m.stripe && m.data_type != BCH_DATA_parity;
326 }
327
328 static inline enum bch_data_type bucket_type(struct bucket_mark m)
329 {
330         return m.cached_sectors && !m.dirty_sectors
331                 ? BCH_DATA_cached
332                 : m.data_type;
333 }
334
335 static bool bucket_became_unavailable(struct bucket_mark old,
336                                       struct bucket_mark new)
337 {
338         return is_available_bucket(old) &&
339                !is_available_bucket(new);
340 }
341
342 static inline void account_bucket(struct bch_fs_usage *fs_usage,
343                                   struct bch_dev_usage *dev_usage,
344                                   enum bch_data_type type,
345                                   int nr, s64 size)
346 {
347         if (type == BCH_DATA_sb || type == BCH_DATA_journal)
348                 fs_usage->hidden        += size;
349
350         dev_usage->d[type].buckets      += nr;
351 }
352
353 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
354                                   struct bch_fs_usage *fs_usage,
355                                   struct bucket_mark old, struct bucket_mark new,
356                                   u64 journal_seq, bool gc)
357 {
358         struct bch_dev_usage *u;
359
360         percpu_rwsem_assert_held(&c->mark_lock);
361
362         preempt_disable();
363         if (!fs_usage)
364                 fs_usage = fs_usage_ptr(c, journal_seq, gc);
365         u = dev_usage_ptr(ca, journal_seq, gc);
366
367         if (bucket_type(old))
368                 account_bucket(fs_usage, u, bucket_type(old),
369                                -1, -ca->mi.bucket_size);
370
371         if (bucket_type(new))
372                 account_bucket(fs_usage, u, bucket_type(new),
373                                1, ca->mi.bucket_size);
374
375         u->buckets_ec += (int) new.stripe - (int) old.stripe;
376         u->buckets_unavailable +=
377                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
378
379         u->d[old.data_type].sectors -= old.dirty_sectors;
380         u->d[new.data_type].sectors += new.dirty_sectors;
381         u->d[BCH_DATA_cached].sectors +=
382                 (int) new.cached_sectors - (int) old.cached_sectors;
383
384         u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
385         u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
386
387         preempt_enable();
388
389         if (!is_available_bucket(old) && is_available_bucket(new))
390                 bch2_wake_allocator(ca);
391 }
392
393 static inline int update_replicas(struct bch_fs *c,
394                                    struct bch_fs_usage *fs_usage,
395                                    struct bch_replicas_entry *r,
396                                    s64 sectors)
397 {
398         int idx = bch2_replicas_entry_idx(c, r);
399
400         if (idx < 0)
401                 return -1;
402
403         fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
404         fs_usage->replicas[idx]         += sectors;
405         return 0;
406 }
407
408 static inline int update_cached_sectors(struct bch_fs *c,
409                                          struct bch_fs_usage *fs_usage,
410                                          unsigned dev, s64 sectors)
411 {
412         struct bch_replicas_padded r;
413
414         bch2_replicas_entry_cached(&r.e, dev);
415
416         return update_replicas(c, fs_usage, &r.e, sectors);
417 }
418
419 static struct replicas_delta_list *
420 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
421 {
422         struct replicas_delta_list *d = trans->fs_usage_deltas;
423         unsigned new_size = d ? (d->size + more) * 2 : 128;
424         unsigned alloc_size = sizeof(*d) + new_size;
425
426         WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
427
428         if (!d || d->used + more > d->size) {
429                 d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
430
431                 BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
432
433                 if (!d) {
434                         d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
435                         memset(d, 0, REPLICAS_DELTA_LIST_MAX);
436
437                         if (trans->fs_usage_deltas)
438                                 memcpy(d, trans->fs_usage_deltas,
439                                        trans->fs_usage_deltas->size + sizeof(*d));
440
441                         new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
442                         kfree(trans->fs_usage_deltas);
443                 }
444
445                 d->size = new_size;
446                 trans->fs_usage_deltas = d;
447         }
448         return d;
449 }
450
451 static inline void update_replicas_list(struct btree_trans *trans,
452                                         struct bch_replicas_entry *r,
453                                         s64 sectors)
454 {
455         struct replicas_delta_list *d;
456         struct replicas_delta *n;
457         unsigned b;
458
459         if (!sectors)
460                 return;
461
462         b = replicas_entry_bytes(r) + 8;
463         d = replicas_deltas_realloc(trans, b);
464
465         n = (void *) d->d + d->used;
466         n->delta = sectors;
467         memcpy(&n->r, r, replicas_entry_bytes(r));
468         bch2_replicas_entry_sort(&n->r);
469         d->used += b;
470 }
471
472 static inline void update_cached_sectors_list(struct btree_trans *trans,
473                                               unsigned dev, s64 sectors)
474 {
475         struct bch_replicas_padded r;
476
477         bch2_replicas_entry_cached(&r.e, dev);
478
479         update_replicas_list(trans, &r.e, sectors);
480 }
481
482 #define do_mark_fn(fn, c, pos, flags, ...)                              \
483 ({                                                                      \
484         int gc, ret = 0;                                                \
485                                                                         \
486         percpu_rwsem_assert_held(&c->mark_lock);                        \
487                                                                         \
488         for (gc = 0; gc < 2 && !ret; gc++)                              \
489                 if (!gc == !(flags & BTREE_TRIGGER_GC) ||               \
490                     (gc && gc_visited(c, pos)))                         \
491                         ret = fn(c, __VA_ARGS__, gc);                   \
492         ret;                                                            \
493 })
494
495 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
496                             size_t b, bool owned_by_allocator)
497 {
498         struct bucket *g = bucket(ca, b);
499         struct bucket_mark old, new;
500
501         old = bucket_cmpxchg(g, new, ({
502                 new.owned_by_allocator  = owned_by_allocator;
503         }));
504
505         BUG_ON(owned_by_allocator == old.owned_by_allocator);
506 }
507
508 static int bch2_mark_alloc(struct bch_fs *c,
509                            struct bkey_s_c old, struct bkey_s_c new,
510                            struct bch_fs_usage *fs_usage,
511                            u64 journal_seq, unsigned flags)
512 {
513         bool gc = flags & BTREE_TRIGGER_GC;
514         struct bkey_alloc_unpacked u;
515         struct bch_dev *ca;
516         struct bucket *g;
517         struct bucket_mark old_m, m;
518
519         /* We don't do anything for deletions - do we?: */
520         if (new.k->type != KEY_TYPE_alloc &&
521             new.k->type != KEY_TYPE_alloc_v2)
522                 return 0;
523
524         /*
525          * alloc btree is read in by bch2_alloc_read, not gc:
526          */
527         if ((flags & BTREE_TRIGGER_GC) &&
528             !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
529                 return 0;
530
531         ca = bch_dev_bkey_exists(c, new.k->p.inode);
532
533         if (new.k->p.offset >= ca->mi.nbuckets)
534                 return 0;
535
536         g = __bucket(ca, new.k->p.offset, gc);
537         u = bch2_alloc_unpack(new);
538
539         old_m = bucket_cmpxchg(g, m, ({
540                 m.gen                   = u.gen;
541                 m.data_type             = u.data_type;
542                 m.dirty_sectors         = u.dirty_sectors;
543                 m.cached_sectors        = u.cached_sectors;
544                 m.stripe                = u.stripe != 0;
545
546                 if (journal_seq) {
547                         m.journal_seq_valid     = 1;
548                         m.journal_seq           = journal_seq;
549                 }
550         }));
551
552         bch2_dev_usage_update(c, ca, fs_usage, old_m, m, journal_seq, gc);
553
554         g->io_time[READ]        = u.read_time;
555         g->io_time[WRITE]       = u.write_time;
556         g->oldest_gen           = u.oldest_gen;
557         g->gen_valid            = 1;
558         g->stripe               = u.stripe;
559         g->stripe_redundancy    = u.stripe_redundancy;
560
561         /*
562          * need to know if we're getting called from the invalidate path or
563          * not:
564          */
565
566         if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
567             old_m.cached_sectors) {
568                 if (update_cached_sectors(c, fs_usage, ca->dev_idx,
569                                       -old_m.cached_sectors)) {
570                         bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
571                         return -1;
572                 }
573
574                 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
575                                  old_m.cached_sectors);
576         }
577
578         return 0;
579 }
580
581 #define checked_add(a, b)                                       \
582 ({                                                              \
583         unsigned _res = (unsigned) (a) + (b);                   \
584         bool overflow = _res > U16_MAX;                         \
585         if (overflow)                                           \
586                 _res = U16_MAX;                                 \
587         (a) = _res;                                             \
588         overflow;                                               \
589 })
590
591 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
592                                        size_t b, enum bch_data_type data_type,
593                                        unsigned sectors, bool gc)
594 {
595         struct bucket *g = __bucket(ca, b, gc);
596         struct bucket_mark old, new;
597         bool overflow;
598
599         BUG_ON(data_type != BCH_DATA_sb &&
600                data_type != BCH_DATA_journal);
601
602         old = bucket_cmpxchg(g, new, ({
603                 new.data_type   = data_type;
604                 overflow = checked_add(new.dirty_sectors, sectors);
605         }));
606
607         bch2_fs_inconsistent_on(old.data_type &&
608                                 old.data_type != data_type, c,
609                 "different types of data in same bucket: %s, %s",
610                 bch2_data_types[old.data_type],
611                 bch2_data_types[data_type]);
612
613         bch2_fs_inconsistent_on(overflow, c,
614                 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
615                 ca->dev_idx, b, new.gen,
616                 bch2_data_types[old.data_type ?: data_type],
617                 old.dirty_sectors, sectors);
618
619         if (c)
620                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
621                                       old, new, 0, gc);
622
623         return 0;
624 }
625
626 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
627                                size_t b, enum bch_data_type type,
628                                unsigned sectors, struct gc_pos pos,
629                                unsigned flags)
630 {
631         BUG_ON(type != BCH_DATA_sb &&
632                type != BCH_DATA_journal);
633
634         /*
635          * Backup superblock might be past the end of our normal usable space:
636          */
637         if (b >= ca->mi.nbuckets)
638                 return;
639
640         preempt_disable();
641
642         if (likely(c)) {
643                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
644                            ca, b, type, sectors);
645         } else {
646                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
647         }
648
649         preempt_enable();
650 }
651
652 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
653 {
654         return DIV_ROUND_UP(sectors * n, d);
655 }
656
657 static s64 __ptr_disk_sectors_delta(unsigned old_size,
658                                     unsigned offset, s64 delta,
659                                     unsigned flags,
660                                     unsigned n, unsigned d)
661 {
662         BUG_ON(!n || !d);
663
664         if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
665                 BUG_ON(offset + -delta > old_size);
666
667                 return -disk_sectors_scaled(n, d, old_size) +
668                         disk_sectors_scaled(n, d, offset) +
669                         disk_sectors_scaled(n, d, old_size - offset + delta);
670         } else if (flags & BTREE_TRIGGER_OVERWRITE) {
671                 BUG_ON(offset + -delta > old_size);
672
673                 return -disk_sectors_scaled(n, d, old_size) +
674                         disk_sectors_scaled(n, d, old_size + delta);
675         } else {
676                 return  disk_sectors_scaled(n, d, delta);
677         }
678 }
679
680 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
681                                   unsigned offset, s64 delta,
682                                   unsigned flags)
683 {
684         return __ptr_disk_sectors_delta(p.crc.live_size,
685                                         offset, delta, flags,
686                                         p.crc.compressed_size,
687                                         p.crc.uncompressed_size);
688 }
689
690 static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
691                             const struct bch_extent_ptr *ptr,
692                             s64 sectors, enum bch_data_type ptr_data_type,
693                             u8 bucket_gen, u8 bucket_data_type,
694                             u16 dirty_sectors, u16 cached_sectors)
695 {
696         size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
697         u16 bucket_sectors = !ptr->cached
698                 ? dirty_sectors
699                 : cached_sectors;
700         char buf[200];
701
702         if (gen_after(ptr->gen, bucket_gen)) {
703                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
704                         "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
705                         "while marking %s",
706                         ptr->dev, bucket_nr, bucket_gen,
707                         bch2_data_types[bucket_data_type ?: ptr_data_type],
708                         ptr->gen,
709                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
710                 return -EIO;
711         }
712
713         if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
714                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
715                         "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
716                         "while marking %s",
717                         ptr->dev, bucket_nr, bucket_gen,
718                         bch2_data_types[bucket_data_type ?: ptr_data_type],
719                         ptr->gen,
720                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
721                 return -EIO;
722         }
723
724         if (bucket_gen != ptr->gen && !ptr->cached) {
725                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
726                         "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
727                         "while marking %s",
728                         ptr->dev, bucket_nr, bucket_gen,
729                         bch2_data_types[bucket_data_type ?: ptr_data_type],
730                         ptr->gen,
731                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
732                 return -EIO;
733         }
734
735         if (bucket_gen != ptr->gen)
736                 return 1;
737
738         if (bucket_data_type && ptr_data_type &&
739             bucket_data_type != ptr_data_type) {
740                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
741                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
742                         "while marking %s",
743                         ptr->dev, bucket_nr, bucket_gen,
744                         bch2_data_types[bucket_data_type],
745                         bch2_data_types[ptr_data_type],
746                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
747                 return -EIO;
748         }
749
750         if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
751                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
752                         "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
753                         "while marking %s",
754                         ptr->dev, bucket_nr, bucket_gen,
755                         bch2_data_types[bucket_data_type ?: ptr_data_type],
756                         bucket_sectors, sectors,
757                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
758                 return -EIO;
759         }
760
761         return 0;
762 }
763
764 static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k,
765                              unsigned ptr_idx,
766                              struct bch_fs_usage *fs_usage,
767                              u64 journal_seq, unsigned flags)
768 {
769         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
770         unsigned nr_data = s->nr_blocks - s->nr_redundant;
771         bool parity = ptr_idx >= nr_data;
772         const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
773         bool gc = flags & BTREE_TRIGGER_GC;
774         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
775         struct bucket *g = PTR_BUCKET(ca, ptr, gc);
776         struct bucket_mark new, old;
777         char buf[200];
778         int ret;
779
780         if (g->stripe && g->stripe != k.k->p.offset) {
781                 bch2_fs_inconsistent(c,
782                               "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
783                               ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
784                               (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
785                 return -EINVAL;
786         }
787
788         old = bucket_cmpxchg(g, new, ({
789                 ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
790                                        new.dirty_sectors, new.cached_sectors);
791                 if (ret)
792                         return ret;
793
794                 if (parity) {
795                         new.data_type           = BCH_DATA_parity;
796                         new.dirty_sectors       = le16_to_cpu(s->sectors);
797                 }
798
799                 if (journal_seq) {
800                         new.journal_seq_valid   = 1;
801                         new.journal_seq         = journal_seq;
802                 }
803         }));
804
805         g->stripe               = k.k->p.offset;
806         g->stripe_redundancy    = s->nr_redundant;
807
808         bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
809         return 0;
810 }
811
812 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
813                           const struct bch_extent_ptr *ptr,
814                           s64 sectors, enum bch_data_type ptr_data_type,
815                           u8 bucket_gen, u8 *bucket_data_type,
816                           u16 *dirty_sectors, u16 *cached_sectors)
817 {
818         u16 *dst_sectors = !ptr->cached
819                 ? dirty_sectors
820                 : cached_sectors;
821         int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
822                                    bucket_gen, *bucket_data_type,
823                                    *dirty_sectors, *cached_sectors);
824
825         if (ret)
826                 return ret;
827
828         *dst_sectors += sectors;
829         *bucket_data_type = *dirty_sectors || *cached_sectors
830                 ? ptr_data_type : 0;
831         return 0;
832 }
833
834 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
835                              struct extent_ptr_decoded p,
836                              s64 sectors, enum bch_data_type data_type,
837                              struct bch_fs_usage *fs_usage,
838                              u64 journal_seq, unsigned flags)
839 {
840         bool gc = flags & BTREE_TRIGGER_GC;
841         struct bucket_mark old, new;
842         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
843         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
844         u8 bucket_data_type;
845         u64 v;
846         int ret;
847
848         v = atomic64_read(&g->_mark.v);
849         do {
850                 new.v.counter = old.v.counter = v;
851                 bucket_data_type = new.data_type;
852
853                 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
854                                      &bucket_data_type,
855                                      &new.dirty_sectors,
856                                      &new.cached_sectors);
857                 if (ret)
858                         return ret;
859
860                 new.data_type = bucket_data_type;
861
862                 if (journal_seq) {
863                         new.journal_seq_valid = 1;
864                         new.journal_seq = journal_seq;
865                 }
866
867                 if (flags & BTREE_TRIGGER_NOATOMIC) {
868                         g->_mark = new;
869                         break;
870                 }
871         } while ((v = atomic64_cmpxchg(&g->_mark.v,
872                               old.v.counter,
873                               new.v.counter)) != old.v.counter);
874
875         bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
876
877         BUG_ON(!gc && bucket_became_unavailable(old, new));
878
879         return 0;
880 }
881
882 static int bch2_mark_stripe_ptr(struct bch_fs *c,
883                                 struct bch_extent_stripe_ptr p,
884                                 enum bch_data_type data_type,
885                                 struct bch_fs_usage *fs_usage,
886                                 s64 sectors, unsigned flags)
887 {
888         bool gc = flags & BTREE_TRIGGER_GC;
889         struct bch_replicas_padded r;
890         struct stripe *m;
891         unsigned i, blocks_nonempty = 0;
892
893         m = genradix_ptr(&c->stripes[gc], p.idx);
894
895         spin_lock(&c->ec_stripes_heap_lock);
896
897         if (!m || !m->alive) {
898                 spin_unlock(&c->ec_stripes_heap_lock);
899                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
900                                     (u64) p.idx);
901                 bch2_inconsistent_error(c);
902                 return -EIO;
903         }
904
905         m->block_sectors[p.block] += sectors;
906
907         r = m->r;
908
909         for (i = 0; i < m->nr_blocks; i++)
910                 blocks_nonempty += m->block_sectors[i] != 0;
911
912         if (m->blocks_nonempty != blocks_nonempty) {
913                 m->blocks_nonempty = blocks_nonempty;
914                 if (!gc)
915                         bch2_stripes_heap_update(c, m, p.idx);
916         }
917
918         spin_unlock(&c->ec_stripes_heap_lock);
919
920         r.e.data_type = data_type;
921         update_replicas(c, fs_usage, &r.e, sectors);
922
923         return 0;
924 }
925
926 static int bch2_mark_extent(struct bch_fs *c,
927                             struct bkey_s_c old, struct bkey_s_c new,
928                             unsigned offset, s64 sectors,
929                             enum bch_data_type data_type,
930                             struct bch_fs_usage *fs_usage,
931                             unsigned journal_seq, unsigned flags)
932 {
933         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
934         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
935         const union bch_extent_entry *entry;
936         struct extent_ptr_decoded p;
937         struct bch_replicas_padded r;
938         s64 dirty_sectors = 0;
939         bool stale;
940         int ret;
941
942         r.e.data_type   = data_type;
943         r.e.nr_devs     = 0;
944         r.e.nr_required = 1;
945
946         BUG_ON(!sectors);
947
948         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
949                 s64 disk_sectors = data_type == BCH_DATA_btree
950                         ? sectors
951                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
952
953                 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
954                                         fs_usage, journal_seq, flags);
955                 if (ret < 0)
956                         return ret;
957
958                 stale = ret > 0;
959
960                 if (p.ptr.cached) {
961                         if (!stale)
962                                 if (update_cached_sectors(c, fs_usage, p.ptr.dev,
963                                                           disk_sectors)) {
964                                         bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
965                                         return -1;
966
967                                 }
968                 } else if (!p.has_ec) {
969                         dirty_sectors          += disk_sectors;
970                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
971                 } else {
972                         ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
973                                         fs_usage, disk_sectors, flags);
974                         if (ret)
975                                 return ret;
976
977                         /*
978                          * There may be other dirty pointers in this extent, but
979                          * if so they're not required for mounting if we have an
980                          * erasure coded pointer in this extent:
981                          */
982                         r.e.nr_required = 0;
983                 }
984         }
985
986         if (r.e.nr_devs) {
987                 if (update_replicas(c, fs_usage, &r.e, dirty_sectors)) {
988                         char buf[200];
989
990                         bch2_bkey_val_to_text(&PBUF(buf), c, k);
991                         bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
992                         return -1;
993                 }
994         }
995
996         return 0;
997 }
998
999 static int bch2_mark_stripe(struct bch_fs *c,
1000                             struct bkey_s_c old, struct bkey_s_c new,
1001                             struct bch_fs_usage *fs_usage,
1002                             u64 journal_seq, unsigned flags)
1003 {
1004         bool gc = flags & BTREE_TRIGGER_GC;
1005         size_t idx = new.k->p.offset;
1006         const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1007                 ? bkey_s_c_to_stripe(old).v : NULL;
1008         const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1009                 ? bkey_s_c_to_stripe(new).v : NULL;
1010         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1011         unsigned i;
1012         int ret;
1013
1014         BUG_ON(gc && old_s);
1015
1016         if (!m || (old_s && !m->alive)) {
1017                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1018                                     idx);
1019                 bch2_inconsistent_error(c);
1020                 return -1;
1021         }
1022
1023         if (!new_s) {
1024                 spin_lock(&c->ec_stripes_heap_lock);
1025                 bch2_stripes_heap_del(c, m, idx);
1026                 spin_unlock(&c->ec_stripes_heap_lock);
1027
1028                 memset(m, 0, sizeof(*m));
1029         } else {
1030                 m->alive        = true;
1031                 m->sectors      = le16_to_cpu(new_s->sectors);
1032                 m->algorithm    = new_s->algorithm;
1033                 m->nr_blocks    = new_s->nr_blocks;
1034                 m->nr_redundant = new_s->nr_redundant;
1035                 m->blocks_nonempty = 0;
1036
1037                 for (i = 0; i < new_s->nr_blocks; i++) {
1038                         m->block_sectors[i] =
1039                                 stripe_blockcount_get(new_s, i);
1040                         m->blocks_nonempty += !!m->block_sectors[i];
1041
1042                         m->ptrs[i] = new_s->ptrs[i];
1043                 }
1044
1045                 bch2_bkey_to_replicas(&m->r.e, new);
1046
1047                 if (!gc) {
1048                         spin_lock(&c->ec_stripes_heap_lock);
1049                         bch2_stripes_heap_update(c, m, idx);
1050                         spin_unlock(&c->ec_stripes_heap_lock);
1051                 }
1052         }
1053
1054         if (gc) {
1055                 /*
1056                  * gc recalculates this field from stripe ptr
1057                  * references:
1058                  */
1059                 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1060                 m->blocks_nonempty = 0;
1061
1062                 for (i = 0; i < new_s->nr_blocks; i++) {
1063                         ret = mark_stripe_bucket(c, new, i, fs_usage,
1064                                                  journal_seq, flags);
1065                         if (ret)
1066                                 return ret;
1067                 }
1068
1069                 if (update_replicas(c, fs_usage, &m->r.e,
1070                                 ((s64) m->sectors * m->nr_redundant))) {
1071                         char buf[200];
1072
1073                         bch2_bkey_val_to_text(&PBUF(buf), c, new);
1074                         bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
1075                         return -1;
1076                 }
1077         }
1078
1079         return 0;
1080 }
1081
1082 static int __reflink_p_frag_references(struct bkey_s_c_reflink_p p,
1083                                        u64 p_start, u64 p_end,
1084                                        u64 v_start, u64 v_end)
1085 {
1086         if (p_start == p_end)
1087                 return false;
1088
1089         p_start += le64_to_cpu(p.v->idx);
1090         p_end   += le64_to_cpu(p.v->idx);
1091
1092         if (p_end <= v_start)
1093                 return false;
1094         if (p_start >= v_end)
1095                 return false;
1096         return true;
1097 }
1098
1099 static int reflink_p_frag_references(struct bkey_s_c_reflink_p p,
1100                                      u64 start, u64 end,
1101                                      struct bkey_s_c k)
1102 {
1103         return __reflink_p_frag_references(p, start, end,
1104                                            bkey_start_offset(k.k),
1105                                            k.k->p.offset);
1106 }
1107
1108 static int __bch2_mark_reflink_p(struct bch_fs *c,
1109                         struct bkey_s_c_reflink_p p,
1110                         u64 idx, unsigned sectors,
1111                         unsigned front_frag,
1112                         unsigned back_frag,
1113                         unsigned flags,
1114                         size_t *r_idx)
1115 {
1116         struct reflink_gc *r;
1117         int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1118         int frags_referenced;
1119
1120         while (1) {
1121                 if (*r_idx >= c->reflink_gc_nr)
1122                         goto not_found;
1123                 r = genradix_ptr(&c->reflink_gc_table, *r_idx);
1124                 BUG_ON(!r);
1125
1126                 if (r->offset > idx)
1127                         break;
1128                 (*r_idx)++;
1129         }
1130
1131         frags_referenced =
1132                 __reflink_p_frag_references(p, 0, front_frag,
1133                                             r->offset - r->size, r->offset) +
1134                 __reflink_p_frag_references(p, back_frag, p.k->size,
1135                                             r->offset - r->size, r->offset);
1136
1137         if (frags_referenced == 2) {
1138                 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE_SPLIT));
1139                 add = -add;
1140         } else if (frags_referenced == 1) {
1141                 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE));
1142                 add = 0;
1143         }
1144
1145         BUG_ON((s64) r->refcount + add < 0);
1146
1147         r->refcount += add;
1148         return min_t(u64, sectors, r->offset - idx);
1149 not_found:
1150         bch2_fs_inconsistent(c,
1151                 "%llu:%llu len %u points to nonexistent indirect extent %llu",
1152                 p.k->p.inode, p.k->p.offset, p.k->size, idx);
1153         bch2_inconsistent_error(c);
1154         return -EIO;
1155 }
1156
1157 static int bch2_mark_reflink_p(struct bch_fs *c,
1158                                struct bkey_s_c_reflink_p p, unsigned offset,
1159                                s64 sectors, unsigned flags)
1160 {
1161         u64 idx = le64_to_cpu(p.v->idx) + offset;
1162         struct reflink_gc *ref;
1163         size_t l, r, m;
1164         unsigned front_frag, back_frag;
1165         s64 ret = 0;
1166
1167         if (sectors < 0)
1168                 sectors = -sectors;
1169
1170         BUG_ON(offset + sectors > p.k->size);
1171
1172         front_frag = offset;
1173         back_frag = offset + sectors;
1174
1175         l = 0;
1176         r = c->reflink_gc_nr;
1177         while (l < r) {
1178                 m = l + (r - l) / 2;
1179
1180                 ref = genradix_ptr(&c->reflink_gc_table, m);
1181                 if (ref->offset <= idx)
1182                         l = m + 1;
1183                 else
1184                         r = m;
1185         }
1186
1187         while (sectors) {
1188                 ret = __bch2_mark_reflink_p(c, p, idx, sectors,
1189                                 front_frag, back_frag, flags, &l);
1190                 if (ret < 0)
1191                         return ret;
1192
1193                 idx     += ret;
1194                 sectors -= ret;
1195         }
1196
1197         return 0;
1198 }
1199
1200 static int bch2_mark_key_locked(struct bch_fs *c,
1201                    struct bkey_s_c old,
1202                    struct bkey_s_c new,
1203                    unsigned offset, s64 sectors,
1204                    struct bch_fs_usage *fs_usage,
1205                    u64 journal_seq, unsigned flags)
1206 {
1207         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1208         int ret = 0;
1209
1210         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1211
1212         preempt_disable();
1213
1214         if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1215                 fs_usage = fs_usage_ptr(c, journal_seq,
1216                                         flags & BTREE_TRIGGER_GC);
1217
1218         switch (k.k->type) {
1219         case KEY_TYPE_alloc:
1220         case KEY_TYPE_alloc_v2:
1221                 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1222                 break;
1223         case KEY_TYPE_btree_ptr:
1224         case KEY_TYPE_btree_ptr_v2:
1225                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1226                         ?  c->opts.btree_node_size
1227                         : -c->opts.btree_node_size;
1228
1229                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1230                                 BCH_DATA_btree, fs_usage, journal_seq, flags);
1231                 break;
1232         case KEY_TYPE_extent:
1233         case KEY_TYPE_reflink_v:
1234                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1235                                 BCH_DATA_user, fs_usage, journal_seq, flags);
1236                 break;
1237         case KEY_TYPE_stripe:
1238                 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1239                 break;
1240         case KEY_TYPE_inode:
1241                 fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode;
1242                 fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode;
1243                 break;
1244         case KEY_TYPE_reservation: {
1245                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1246
1247                 sectors *= replicas;
1248                 replicas = clamp_t(unsigned, replicas, 1,
1249                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1250
1251                 fs_usage->reserved                              += sectors;
1252                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1253                 break;
1254         }
1255         case KEY_TYPE_reflink_p:
1256                 ret = bch2_mark_reflink_p(c, bkey_s_c_to_reflink_p(k),
1257                                           offset, sectors, flags);
1258                 break;
1259         }
1260
1261         preempt_enable();
1262
1263         return ret;
1264 }
1265
1266 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1267                   unsigned offset, s64 sectors,
1268                   struct bch_fs_usage *fs_usage,
1269                   u64 journal_seq, unsigned flags)
1270 {
1271         struct bkey deleted;
1272         struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1273         int ret;
1274
1275         bkey_init(&deleted);
1276
1277         percpu_down_read(&c->mark_lock);
1278         ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1279                                    fs_usage, journal_seq,
1280                                    BTREE_TRIGGER_INSERT|flags);
1281         percpu_up_read(&c->mark_lock);
1282
1283         return ret;
1284 }
1285
1286 int bch2_mark_update(struct btree_trans *trans,
1287                      struct btree_iter *iter,
1288                      struct bkey_i *new,
1289                      struct bch_fs_usage *fs_usage,
1290                      unsigned flags)
1291 {
1292         struct bch_fs           *c = trans->c;
1293         struct bkey_s_c         old;
1294         struct bkey             unpacked;
1295         int ret = 0;
1296
1297         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1298                 return 0;
1299
1300         if (!btree_node_type_needs_gc(iter->btree_id))
1301                 return 0;
1302
1303         bkey_init(&unpacked);
1304         old = (struct bkey_s_c) { &unpacked, NULL };
1305
1306         if (!btree_node_type_is_extents(iter->btree_id)) {
1307                 /* iterators should be uptodate, shouldn't get errors here: */
1308                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1309                         old = bch2_btree_iter_peek_slot(iter);
1310                         BUG_ON(bkey_err(old));
1311                 } else {
1312                         struct bkey_cached *ck = (void *) iter->l[0].b;
1313
1314                         if (ck->valid)
1315                                 old = bkey_i_to_s_c(ck->k);
1316                 }
1317
1318                 if (old.k->type == new->k.type) {
1319                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1320                                 fs_usage, trans->journal_res.seq,
1321                                 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1322
1323                 } else {
1324                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1325                                 fs_usage, trans->journal_res.seq,
1326                                 BTREE_TRIGGER_INSERT|flags);
1327                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1328                                 fs_usage, trans->journal_res.seq,
1329                                 BTREE_TRIGGER_OVERWRITE|flags);
1330                 }
1331         } else {
1332                 struct btree_iter *copy;
1333
1334                 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1335                 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1336                         0, new->k.size,
1337                         fs_usage, trans->journal_res.seq,
1338                         BTREE_TRIGGER_INSERT|flags);
1339
1340                 copy = bch2_trans_copy_iter(trans, iter);
1341
1342                 for_each_btree_key_continue(copy, 0, old, ret) {
1343                         unsigned offset = 0;
1344                         s64 sectors = -((s64) old.k->size);
1345
1346                         flags |= BTREE_TRIGGER_OVERWRITE;
1347
1348                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1349                                 break;
1350
1351                         switch (bch2_extent_overlap(&new->k, old.k)) {
1352                         case BCH_EXTENT_OVERLAP_ALL:
1353                                 offset = 0;
1354                                 sectors = -((s64) old.k->size);
1355                                 break;
1356                         case BCH_EXTENT_OVERLAP_BACK:
1357                                 offset = bkey_start_offset(&new->k) -
1358                                         bkey_start_offset(old.k);
1359                                 sectors = bkey_start_offset(&new->k) -
1360                                         old.k->p.offset;
1361                                 break;
1362                         case BCH_EXTENT_OVERLAP_FRONT:
1363                                 offset = 0;
1364                                 sectors = bkey_start_offset(old.k) -
1365                                         new->k.p.offset;
1366                                 break;
1367                         case BCH_EXTENT_OVERLAP_MIDDLE:
1368                                 offset = bkey_start_offset(&new->k) -
1369                                         bkey_start_offset(old.k);
1370                                 sectors = -((s64) new->k.size);
1371                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1372                                 break;
1373                         }
1374
1375                         BUG_ON(sectors >= 0);
1376
1377                         ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1378                                         offset, sectors, fs_usage,
1379                                         trans->journal_res.seq, flags) ?: 1;
1380                         if (ret <= 0)
1381                                 break;
1382                 }
1383                 bch2_trans_iter_put(trans, copy);
1384         }
1385
1386         return ret;
1387 }
1388
1389 static noinline __cold
1390 void fs_usage_apply_warn(struct btree_trans *trans,
1391                          unsigned disk_res_sectors,
1392                          s64 should_not_have_added)
1393 {
1394         struct bch_fs *c = trans->c;
1395         struct btree_insert_entry *i;
1396         char buf[200];
1397
1398         bch_err(c, "disk usage increased %lli more than %u sectors reserved",
1399                 should_not_have_added, disk_res_sectors);
1400
1401         trans_for_each_update(trans, i) {
1402                 pr_err("while inserting");
1403                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1404                 pr_err("%s", buf);
1405                 pr_err("overlapping with");
1406
1407                 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1408                         struct btree_iter *copy = bch2_trans_copy_iter(trans, i->iter);
1409                         struct bkey_s_c k;
1410                         int ret;
1411
1412                         for_each_btree_key_continue(copy, 0, k, ret) {
1413                                 if (btree_node_type_is_extents(i->iter->btree_id)
1414                                     ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1415                                     : bkey_cmp(i->k->k.p, k.k->p))
1416                                         break;
1417
1418                                 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1419                                 pr_err("%s", buf);
1420                         }
1421                         bch2_trans_iter_put(trans, copy);
1422                 } else {
1423                         struct bkey_cached *ck = (void *) i->iter->l[0].b;
1424
1425                         if (ck->valid) {
1426                                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1427                                 pr_err("%s", buf);
1428                         }
1429                 }
1430         }
1431         __WARN();
1432 }
1433
1434 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1435                                struct replicas_delta_list *deltas)
1436 {
1437         struct bch_fs *c = trans->c;
1438         static int warned_disk_usage = 0;
1439         bool warn = false;
1440         unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1441         struct replicas_delta *d = deltas->d;
1442         struct replicas_delta *top = (void *) deltas->d + deltas->used;
1443         struct bch_fs_usage *dst;
1444         s64 added = 0, should_not_have_added;
1445         unsigned i;
1446
1447         percpu_rwsem_assert_held(&c->mark_lock);
1448
1449         preempt_disable();
1450         dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1451
1452         for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1453                 switch (d->r.data_type) {
1454                 case BCH_DATA_btree:
1455                 case BCH_DATA_user:
1456                 case BCH_DATA_parity:
1457                         added += d->delta;
1458                 }
1459
1460                 BUG_ON(update_replicas(c, dst, &d->r, d->delta));
1461         }
1462
1463         dst->nr_inodes += deltas->nr_inodes;
1464
1465         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1466                 added                           += deltas->persistent_reserved[i];
1467                 dst->reserved                   += deltas->persistent_reserved[i];
1468                 dst->persistent_reserved[i]     += deltas->persistent_reserved[i];
1469         }
1470
1471         /*
1472          * Not allowed to reduce sectors_available except by getting a
1473          * reservation:
1474          */
1475         should_not_have_added = added - (s64) disk_res_sectors;
1476         if (unlikely(should_not_have_added > 0)) {
1477                 atomic64_sub(should_not_have_added, &c->sectors_available);
1478                 added -= should_not_have_added;
1479                 warn = true;
1480         }
1481
1482         if (added > 0) {
1483                 trans->disk_res->sectors -= added;
1484                 this_cpu_sub(*c->online_reserved, added);
1485         }
1486
1487         preempt_enable();
1488
1489         if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1490                 fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
1491 }
1492
1493 /* trans_mark: */
1494
1495 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1496                             enum btree_id btree_id, struct bpos pos,
1497                             struct bkey_s_c *k)
1498 {
1499         struct btree_insert_entry *i;
1500
1501         trans_for_each_update(trans, i)
1502                 if (i->iter->btree_id == btree_id &&
1503                     (btree_node_type_is_extents(btree_id)
1504                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1505                        bkey_cmp(pos, i->k->k.p) < 0
1506                      : !bkey_cmp(pos, i->iter->pos))) {
1507                         *k = bkey_i_to_s_c(i->k);
1508
1509                         /* ugly hack.. */
1510                         BUG_ON(btree_iter_live(trans, i->iter));
1511                         trans->iters_live |= 1ULL << i->iter->idx;
1512                         return i->iter;
1513                 }
1514
1515         return NULL;
1516 }
1517
1518 static int trans_get_key(struct btree_trans *trans,
1519                          enum btree_id btree_id, struct bpos pos,
1520                          struct btree_iter **iter,
1521                          struct bkey_s_c *k)
1522 {
1523         unsigned flags = btree_id != BTREE_ID_alloc
1524                 ? BTREE_ITER_SLOTS
1525                 : BTREE_ITER_CACHED;
1526         int ret;
1527
1528         *iter = trans_get_update(trans, btree_id, pos, k);
1529         if (*iter)
1530                 return 1;
1531
1532         *iter = bch2_trans_get_iter(trans, btree_id, pos,
1533                                     flags|BTREE_ITER_INTENT);
1534         *k = __bch2_btree_iter_peek(*iter, flags);
1535         ret = bkey_err(*k);
1536         if (ret)
1537                 bch2_trans_iter_put(trans, *iter);
1538         return ret;
1539 }
1540
1541 static struct bkey_alloc_buf *
1542 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter,
1543                               const struct bch_extent_ptr *ptr,
1544                               struct bkey_alloc_unpacked *u)
1545 {
1546         struct bch_fs *c = trans->c;
1547         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1548         struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1549         struct bucket *g;
1550         struct btree_iter *iter;
1551         struct bkey_s_c k;
1552         struct bkey_alloc_buf *a;
1553         int ret;
1554
1555         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
1556         if (IS_ERR(a))
1557                 return a;
1558
1559         iter = trans_get_update(trans, BTREE_ID_alloc, pos, &k);
1560         if (iter) {
1561                 *u = bch2_alloc_unpack(k);
1562         } else {
1563                 iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, pos,
1564                                            BTREE_ITER_CACHED|
1565                                            BTREE_ITER_CACHED_NOFILL|
1566                                            BTREE_ITER_INTENT);
1567                 ret = bch2_btree_iter_traverse(iter);
1568                 if (ret) {
1569                         bch2_trans_iter_put(trans, iter);
1570                         return ERR_PTR(ret);
1571                 }
1572
1573                 percpu_down_read(&c->mark_lock);
1574                 g = bucket(ca, pos.offset);
1575                 *u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
1576                 percpu_up_read(&c->mark_lock);
1577         }
1578
1579         *_iter = iter;
1580         return a;
1581 }
1582
1583 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1584                         struct bkey_s_c k, struct extent_ptr_decoded p,
1585                         s64 sectors, enum bch_data_type data_type)
1586 {
1587         struct bch_fs *c = trans->c;
1588         struct btree_iter *iter;
1589         struct bkey_alloc_unpacked u;
1590         struct bkey_alloc_buf *a;
1591         int ret;
1592
1593         a = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1594         if (IS_ERR(a))
1595                 return PTR_ERR(a);
1596
1597         ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
1598                              &u.dirty_sectors, &u.cached_sectors);
1599         if (ret)
1600                 goto out;
1601
1602         bch2_alloc_pack(c, a, u);
1603         bch2_trans_update(trans, iter, &a->k, 0);
1604 out:
1605         bch2_trans_iter_put(trans, iter);
1606         return ret;
1607 }
1608
1609 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1610                         struct extent_ptr_decoded p,
1611                         s64 sectors, enum bch_data_type data_type)
1612 {
1613         struct bch_fs *c = trans->c;
1614         struct btree_iter *iter;
1615         struct bkey_s_c k;
1616         struct bkey_i_stripe *s;
1617         struct bch_replicas_padded r;
1618         int ret = 0;
1619
1620         ret = trans_get_key(trans, BTREE_ID_stripes, POS(0, p.ec.idx), &iter, &k);
1621         if (ret < 0)
1622                 return ret;
1623
1624         if (k.k->type != KEY_TYPE_stripe) {
1625                 bch2_fs_inconsistent(c,
1626                         "pointer to nonexistent stripe %llu",
1627                         (u64) p.ec.idx);
1628                 bch2_inconsistent_error(c);
1629                 ret = -EIO;
1630                 goto out;
1631         }
1632
1633         if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1634                 bch2_fs_inconsistent(c,
1635                         "stripe pointer doesn't match stripe %llu",
1636                         (u64) p.ec.idx);
1637                 ret = -EIO;
1638                 goto out;
1639         }
1640
1641         s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1642         ret = PTR_ERR_OR_ZERO(s);
1643         if (ret)
1644                 goto out;
1645
1646         bkey_reassemble(&s->k_i, k);
1647         stripe_blockcount_set(&s->v, p.ec.block,
1648                 stripe_blockcount_get(&s->v, p.ec.block) +
1649                 sectors);
1650         bch2_trans_update(trans, iter, &s->k_i, 0);
1651
1652         bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1653         r.e.data_type = data_type;
1654         update_replicas_list(trans, &r.e, sectors);
1655 out:
1656         bch2_trans_iter_put(trans, iter);
1657         return ret;
1658 }
1659
1660 static int bch2_trans_mark_extent(struct btree_trans *trans,
1661                         struct bkey_s_c k, unsigned offset,
1662                         s64 sectors, unsigned flags,
1663                         enum bch_data_type data_type)
1664 {
1665         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1666         const union bch_extent_entry *entry;
1667         struct extent_ptr_decoded p;
1668         struct bch_replicas_padded r;
1669         s64 dirty_sectors = 0;
1670         bool stale;
1671         int ret;
1672
1673         r.e.data_type   = data_type;
1674         r.e.nr_devs     = 0;
1675         r.e.nr_required = 1;
1676
1677         BUG_ON(!sectors);
1678
1679         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1680                 s64 disk_sectors = data_type == BCH_DATA_btree
1681                         ? sectors
1682                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1683
1684                 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1685                                               data_type);
1686                 if (ret < 0)
1687                         return ret;
1688
1689                 stale = ret > 0;
1690
1691                 if (p.ptr.cached) {
1692                         if (!stale)
1693                                 update_cached_sectors_list(trans, p.ptr.dev,
1694                                                            disk_sectors);
1695                 } else if (!p.has_ec) {
1696                         dirty_sectors          += disk_sectors;
1697                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1698                 } else {
1699                         ret = bch2_trans_mark_stripe_ptr(trans, p,
1700                                         disk_sectors, data_type);
1701                         if (ret)
1702                                 return ret;
1703
1704                         r.e.nr_required = 0;
1705                 }
1706         }
1707
1708         if (r.e.nr_devs)
1709                 update_replicas_list(trans, &r.e, dirty_sectors);
1710
1711         return 0;
1712 }
1713
1714 static int bch2_trans_mark_stripe_alloc_ref(struct btree_trans *trans,
1715                                             struct bkey_s_c_stripe s,
1716                                             unsigned idx, bool deleting)
1717 {
1718         struct bch_fs *c = trans->c;
1719         const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1720         struct bkey_alloc_buf *a;
1721         struct btree_iter *iter;
1722         struct bkey_alloc_unpacked u;
1723         bool parity = idx >= s.v->nr_blocks - s.v->nr_redundant;
1724         int ret = 0;
1725
1726         a = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
1727         if (IS_ERR(a))
1728                 return PTR_ERR(a);
1729
1730         if (parity) {
1731                 s64 sectors = le16_to_cpu(s.v->sectors);
1732
1733                 if (deleting)
1734                         sectors = -sectors;
1735
1736                 u.dirty_sectors += sectors;
1737                 u.data_type = u.dirty_sectors
1738                         ? BCH_DATA_parity
1739                         : 0;
1740         }
1741
1742         if (!deleting) {
1743                 if (bch2_fs_inconsistent_on(u.stripe && u.stripe != s.k->p.offset, c,
1744                                 "bucket %llu:%llu gen %u: multiple stripes using same bucket (%u, %llu)",
1745                                 iter->pos.inode, iter->pos.offset, u.gen,
1746                                 u.stripe, s.k->p.offset)) {
1747                         ret = -EIO;
1748                         goto err;
1749                 }
1750
1751                 u.stripe                = s.k->p.offset;
1752                 u.stripe_redundancy     = s.v->nr_redundant;
1753         } else {
1754                 u.stripe                = 0;
1755                 u.stripe_redundancy     = 0;
1756         }
1757
1758         bch2_alloc_pack(c, a, u);
1759         bch2_trans_update(trans, iter, &a->k, 0);
1760 err:
1761         bch2_trans_iter_put(trans, iter);
1762         return ret;
1763 }
1764
1765 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1766                                   struct bkey_s_c old, struct bkey_s_c new,
1767                                   unsigned flags)
1768 {
1769         struct bkey_s_c_stripe old_s = { .k = NULL };
1770         struct bkey_s_c_stripe new_s = { .k = NULL };
1771         struct bch_replicas_padded r;
1772         unsigned i;
1773         int ret = 0;
1774
1775         if (old.k->type == KEY_TYPE_stripe)
1776                 old_s = bkey_s_c_to_stripe(old);
1777         if (new.k->type == KEY_TYPE_stripe)
1778                 new_s = bkey_s_c_to_stripe(new);
1779
1780         /*
1781          * If the pointers aren't changing, we don't need to do anything:
1782          */
1783         if (new_s.k && old_s.k &&
1784             new_s.v->nr_blocks          == old_s.v->nr_blocks &&
1785             new_s.v->nr_redundant       == old_s.v->nr_redundant &&
1786             !memcmp(old_s.v->ptrs, new_s.v->ptrs,
1787                     new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
1788                 return 0;
1789
1790         if (new_s.k) {
1791                 s64 sectors = le16_to_cpu(new_s.v->sectors);
1792
1793                 bch2_bkey_to_replicas(&r.e, new);
1794                 update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
1795
1796                 for (i = 0; i < new_s.v->nr_blocks; i++) {
1797                         ret = bch2_trans_mark_stripe_alloc_ref(trans, new_s,
1798                                                                i, false);
1799                         if (ret)
1800                                 return ret;
1801                 }
1802         }
1803
1804         if (old_s.k) {
1805                 s64 sectors = -((s64) le16_to_cpu(old_s.v->sectors));
1806
1807                 bch2_bkey_to_replicas(&r.e, old);
1808                 update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
1809
1810                 for (i = 0; i < old_s.v->nr_blocks; i++) {
1811                         ret = bch2_trans_mark_stripe_alloc_ref(trans, old_s,
1812                                                                i, true);
1813                         if (ret)
1814                                 return ret;
1815                 }
1816         }
1817
1818         return ret;
1819 }
1820
1821 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1822                         struct bkey_s_c_reflink_p p,
1823                         u64 idx, unsigned sectors,
1824                         unsigned front_frag,
1825                         unsigned back_frag,
1826                         unsigned flags)
1827 {
1828         struct bch_fs *c = trans->c;
1829         struct btree_iter *iter;
1830         struct bkey_s_c k;
1831         struct bkey_i *n;
1832         __le64 *refcount;
1833         int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1834         int frags_referenced;
1835         s64 ret;
1836
1837         ret = trans_get_key(trans, BTREE_ID_reflink,
1838                             POS(0, idx), &iter, &k);
1839         if (ret < 0)
1840                 return ret;
1841
1842         sectors = min_t(u64, sectors, k.k->p.offset - idx);
1843
1844         frags_referenced =
1845                 reflink_p_frag_references(p, 0, front_frag, k) +
1846                 reflink_p_frag_references(p, back_frag, p.k->size, k);
1847
1848         if (frags_referenced == 2) {
1849                 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE_SPLIT));
1850                 add = -add;
1851         } else if (frags_referenced == 1) {
1852                 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE));
1853                 goto out;
1854         }
1855
1856         n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1857         ret = PTR_ERR_OR_ZERO(n);
1858         if (ret)
1859                 goto err;
1860
1861         bkey_reassemble(n, k);
1862
1863         refcount = bkey_refcount(n);
1864         if (!refcount) {
1865                 bch2_fs_inconsistent(c,
1866                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1867                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1868                 bch2_inconsistent_error(c);
1869                 ret = -EIO;
1870                 goto err;
1871         }
1872
1873         BUG_ON(!*refcount && (flags & BTREE_TRIGGER_OVERWRITE));
1874         le64_add_cpu(refcount, add);
1875
1876         if (!*refcount) {
1877                 n->k.type = KEY_TYPE_deleted;
1878                 set_bkey_val_u64s(&n->k, 0);
1879         }
1880
1881         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1882         ret = bch2_trans_update(trans, iter, n, 0);
1883         if (ret)
1884                 goto err;
1885 out:
1886         ret = sectors;
1887 err:
1888         bch2_trans_iter_put(trans, iter);
1889         return ret;
1890 }
1891
1892 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1893                         struct bkey_s_c_reflink_p p, unsigned offset,
1894                         s64 sectors, unsigned flags)
1895 {
1896         u64 idx = le64_to_cpu(p.v->idx) + offset;
1897         unsigned front_frag, back_frag;
1898         s64 ret = 0;
1899
1900         if (sectors < 0)
1901                 sectors = -sectors;
1902
1903         BUG_ON(offset + sectors > p.k->size);
1904
1905         front_frag = offset;
1906         back_frag = offset + sectors;
1907
1908         while (sectors) {
1909                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors,
1910                                         front_frag, back_frag, flags);
1911                 if (ret < 0)
1912                         return ret;
1913
1914                 idx     += ret;
1915                 sectors -= ret;
1916         }
1917
1918         return 0;
1919 }
1920
1921 int bch2_trans_mark_key(struct btree_trans *trans,
1922                         struct bkey_s_c old,
1923                         struct bkey_s_c new,
1924                         unsigned offset, s64 sectors, unsigned flags)
1925 {
1926         struct bch_fs *c = trans->c;
1927         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1928         struct replicas_delta_list *d;
1929
1930         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1931
1932         switch (k.k->type) {
1933         case KEY_TYPE_btree_ptr:
1934         case KEY_TYPE_btree_ptr_v2:
1935                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1936                         ?  c->opts.btree_node_size
1937                         : -c->opts.btree_node_size;
1938
1939                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1940                                               flags, BCH_DATA_btree);
1941         case KEY_TYPE_extent:
1942         case KEY_TYPE_reflink_v:
1943                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1944                                               flags, BCH_DATA_user);
1945         case KEY_TYPE_stripe:
1946                 return bch2_trans_mark_stripe(trans, old, new, flags);
1947         case KEY_TYPE_inode: {
1948                 int nr = (new.k->type == KEY_TYPE_inode) -
1949                          (old.k->type == KEY_TYPE_inode);
1950
1951                 if (nr) {
1952                         d = replicas_deltas_realloc(trans, 0);
1953                         d->nr_inodes += nr;
1954                 }
1955
1956                 return 0;
1957         }
1958         case KEY_TYPE_reservation: {
1959                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1960
1961                 d = replicas_deltas_realloc(trans, 0);
1962
1963                 sectors *= replicas;
1964                 replicas = clamp_t(unsigned, replicas, 1,
1965                                    ARRAY_SIZE(d->persistent_reserved));
1966
1967                 d->persistent_reserved[replicas - 1] += sectors;
1968                 return 0;
1969         }
1970         case KEY_TYPE_reflink_p:
1971                 return bch2_trans_mark_reflink_p(trans,
1972                                         bkey_s_c_to_reflink_p(k),
1973                                         offset, sectors, flags);
1974         default:
1975                 return 0;
1976         }
1977 }
1978
1979 int bch2_trans_mark_update(struct btree_trans *trans,
1980                            struct btree_iter *iter,
1981                            struct bkey_i *new,
1982                            unsigned flags)
1983 {
1984         struct bkey_s_c old;
1985         int ret;
1986
1987         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1988                 return 0;
1989
1990         if (!btree_node_type_needs_gc(iter->btree_id))
1991                 return 0;
1992
1993         if (!btree_node_type_is_extents(iter->btree_id)) {
1994                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1995                         old = bch2_btree_iter_peek_slot(iter);
1996                         ret = bkey_err(old);
1997                         if (ret)
1998                                 return ret;
1999                 } else {
2000                         struct bkey_cached *ck = (void *) iter->l[0].b;
2001
2002                         BUG_ON(!ck->valid);
2003                         old = bkey_i_to_s_c(ck->k);
2004                 }
2005
2006                 if (old.k->type == new->k.type) {
2007                         ret   = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
2008                                         BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
2009                 } else {
2010                         ret   = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
2011                                         BTREE_TRIGGER_INSERT|flags) ?:
2012                                 bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
2013                                         BTREE_TRIGGER_OVERWRITE|flags);
2014                 }
2015         } else {
2016                 struct btree_iter *copy;
2017                 struct bkey _old;
2018
2019                 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
2020
2021                 bkey_init(&_old);
2022                 old = (struct bkey_s_c) { &_old, NULL };
2023
2024                 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
2025                                           0, new->k.size,
2026                                           BTREE_TRIGGER_INSERT);
2027                 if (ret)
2028                         return ret;
2029
2030                 copy = bch2_trans_copy_iter(trans, iter);
2031
2032                 for_each_btree_key_continue(copy, 0, old, ret) {
2033                         unsigned offset = 0;
2034                         s64 sectors = -((s64) old.k->size);
2035
2036                         flags |= BTREE_TRIGGER_OVERWRITE;
2037
2038                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
2039                                 break;
2040
2041                         switch (bch2_extent_overlap(&new->k, old.k)) {
2042                         case BCH_EXTENT_OVERLAP_ALL:
2043                                 offset = 0;
2044                                 sectors = -((s64) old.k->size);
2045                                 break;
2046                         case BCH_EXTENT_OVERLAP_BACK:
2047                                 offset = bkey_start_offset(&new->k) -
2048                                         bkey_start_offset(old.k);
2049                                 sectors = bkey_start_offset(&new->k) -
2050                                         old.k->p.offset;
2051                                 break;
2052                         case BCH_EXTENT_OVERLAP_FRONT:
2053                                 offset = 0;
2054                                 sectors = bkey_start_offset(old.k) -
2055                                         new->k.p.offset;
2056                                 break;
2057                         case BCH_EXTENT_OVERLAP_MIDDLE:
2058                                 offset = bkey_start_offset(&new->k) -
2059                                         bkey_start_offset(old.k);
2060                                 sectors = -((s64) new->k.size);
2061                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
2062                                 break;
2063                         }
2064
2065                         BUG_ON(sectors >= 0);
2066
2067                         ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
2068                                         offset, sectors, flags);
2069                         if (ret)
2070                                 break;
2071                 }
2072                 bch2_trans_iter_put(trans, copy);
2073         }
2074
2075         return ret;
2076 }
2077
2078 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
2079                                     struct bch_dev *ca, size_t b,
2080                                     enum bch_data_type type,
2081                                     unsigned sectors)
2082 {
2083         struct bch_fs *c = trans->c;
2084         struct btree_iter *iter;
2085         struct bkey_alloc_unpacked u;
2086         struct bkey_alloc_buf *a;
2087         struct bch_extent_ptr ptr = {
2088                 .dev = ca->dev_idx,
2089                 .offset = bucket_to_sector(ca, b),
2090         };
2091         int ret = 0;
2092
2093         /*
2094          * Backup superblock might be past the end of our normal usable space:
2095          */
2096         if (b >= ca->mi.nbuckets)
2097                 return 0;
2098
2099         a = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
2100         if (IS_ERR(a))
2101                 return PTR_ERR(a);
2102
2103         if (u.data_type && u.data_type != type) {
2104                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
2105                         "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
2106                         "while marking %s",
2107                         iter->pos.inode, iter->pos.offset, u.gen,
2108                         bch2_data_types[u.data_type],
2109                         bch2_data_types[type],
2110                         bch2_data_types[type]);
2111                 ret = -EIO;
2112                 goto out;
2113         }
2114
2115         u.data_type     = type;
2116         u.dirty_sectors = sectors;
2117
2118         bch2_alloc_pack(c, a, u);
2119         bch2_trans_update(trans, iter, &a->k, 0);
2120 out:
2121         bch2_trans_iter_put(trans, iter);
2122         return ret;
2123 }
2124
2125 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
2126                                     struct bch_dev *ca, size_t b,
2127                                     enum bch_data_type type,
2128                                     unsigned sectors)
2129 {
2130         return __bch2_trans_do(trans, NULL, NULL, 0,
2131                         __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
2132 }
2133
2134 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
2135                                             struct bch_dev *ca,
2136                                             u64 start, u64 end,
2137                                             enum bch_data_type type,
2138                                             u64 *bucket, unsigned *bucket_sectors)
2139 {
2140         do {
2141                 u64 b = sector_to_bucket(ca, start);
2142                 unsigned sectors =
2143                         min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
2144
2145                 if (b != *bucket && *bucket_sectors) {
2146                         int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
2147                                                                   type, *bucket_sectors);
2148                         if (ret)
2149                                 return ret;
2150
2151                         *bucket_sectors = 0;
2152                 }
2153
2154                 *bucket         = b;
2155                 *bucket_sectors += sectors;
2156                 start += sectors;
2157         } while (start < end);
2158
2159         return 0;
2160 }
2161
2162 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
2163                                     struct bch_dev *ca)
2164 {
2165         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
2166         u64 bucket = 0;
2167         unsigned i, bucket_sectors = 0;
2168         int ret;
2169
2170         for (i = 0; i < layout->nr_superblocks; i++) {
2171                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
2172
2173                 if (offset == BCH_SB_SECTOR) {
2174                         ret = bch2_trans_mark_metadata_sectors(trans, ca,
2175                                                 0, BCH_SB_SECTOR,
2176                                                 BCH_DATA_sb, &bucket, &bucket_sectors);
2177                         if (ret)
2178                                 return ret;
2179                 }
2180
2181                 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
2182                                       offset + (1 << layout->sb_max_size_bits),
2183                                       BCH_DATA_sb, &bucket, &bucket_sectors);
2184                 if (ret)
2185                         return ret;
2186         }
2187
2188         if (bucket_sectors) {
2189                 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2190                                 bucket, BCH_DATA_sb, bucket_sectors);
2191                 if (ret)
2192                         return ret;
2193         }
2194
2195         for (i = 0; i < ca->journal.nr; i++) {
2196                 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2197                                 ca->journal.buckets[i],
2198                                 BCH_DATA_journal, ca->mi.bucket_size);
2199                 if (ret)
2200                         return ret;
2201         }
2202
2203         return 0;
2204 }
2205
2206 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
2207 {
2208         return bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
2209                         __bch2_trans_mark_dev_sb(&trans, ca));
2210 }
2211
2212 /* Disk reservations: */
2213
2214 #define SECTORS_CACHE   1024
2215
2216 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2217                               u64 sectors, int flags)
2218 {
2219         struct bch_fs_pcpu *pcpu;
2220         u64 old, v, get;
2221         s64 sectors_available;
2222         int ret;
2223
2224         percpu_down_read(&c->mark_lock);
2225         preempt_disable();
2226         pcpu = this_cpu_ptr(c->pcpu);
2227
2228         if (sectors <= pcpu->sectors_available)
2229                 goto out;
2230
2231         v = atomic64_read(&c->sectors_available);
2232         do {
2233                 old = v;
2234                 get = min((u64) sectors + SECTORS_CACHE, old);
2235
2236                 if (get < sectors) {
2237                         preempt_enable();
2238                         goto recalculate;
2239                 }
2240         } while ((v = atomic64_cmpxchg(&c->sectors_available,
2241                                        old, old - get)) != old);
2242
2243         pcpu->sectors_available         += get;
2244
2245 out:
2246         pcpu->sectors_available         -= sectors;
2247         this_cpu_add(*c->online_reserved, sectors);
2248         res->sectors                    += sectors;
2249
2250         preempt_enable();
2251         percpu_up_read(&c->mark_lock);
2252         return 0;
2253
2254 recalculate:
2255         mutex_lock(&c->sectors_available_lock);
2256
2257         percpu_u64_set(&c->pcpu->sectors_available, 0);
2258         sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2259
2260         if (sectors <= sectors_available ||
2261             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2262                 atomic64_set(&c->sectors_available,
2263                              max_t(s64, 0, sectors_available - sectors));
2264                 this_cpu_add(*c->online_reserved, sectors);
2265                 res->sectors                    += sectors;
2266                 ret = 0;
2267         } else {
2268                 atomic64_set(&c->sectors_available, sectors_available);
2269                 ret = -ENOSPC;
2270         }
2271
2272         mutex_unlock(&c->sectors_available_lock);
2273         percpu_up_read(&c->mark_lock);
2274
2275         return ret;
2276 }
2277
2278 /* Startup/shutdown: */
2279
2280 static void buckets_free_rcu(struct rcu_head *rcu)
2281 {
2282         struct bucket_array *buckets =
2283                 container_of(rcu, struct bucket_array, rcu);
2284
2285         kvpfree(buckets,
2286                 sizeof(struct bucket_array) +
2287                 buckets->nbuckets * sizeof(struct bucket));
2288 }
2289
2290 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2291 {
2292         struct bucket_array *buckets = NULL, *old_buckets = NULL;
2293         unsigned long *buckets_nouse = NULL;
2294         alloc_fifo      free[RESERVE_NR];
2295         alloc_fifo      free_inc;
2296         alloc_heap      alloc_heap;
2297
2298         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2299                              ca->mi.bucket_size / c->opts.btree_node_size);
2300         /* XXX: these should be tunable */
2301         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
2302         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 6);
2303         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
2304                                       btree_reserve * 2);
2305         bool resize = ca->buckets[0] != NULL;
2306         int ret = -ENOMEM;
2307         unsigned i;
2308
2309         memset(&free,           0, sizeof(free));
2310         memset(&free_inc,       0, sizeof(free_inc));
2311         memset(&alloc_heap,     0, sizeof(alloc_heap));
2312
2313         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
2314                                             nbuckets * sizeof(struct bucket),
2315                                             GFP_KERNEL|__GFP_ZERO)) ||
2316             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2317                                             sizeof(unsigned long),
2318                                             GFP_KERNEL|__GFP_ZERO)) ||
2319             !init_fifo(&free[RESERVE_MOVINGGC],
2320                        copygc_reserve, GFP_KERNEL) ||
2321             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2322             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
2323             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2324                 goto err;
2325
2326         buckets->first_bucket   = ca->mi.first_bucket;
2327         buckets->nbuckets       = nbuckets;
2328
2329         bch2_copygc_stop(c);
2330
2331         if (resize) {
2332                 down_write(&c->gc_lock);
2333                 down_write(&ca->bucket_lock);
2334                 percpu_down_write(&c->mark_lock);
2335         }
2336
2337         old_buckets = bucket_array(ca);
2338
2339         if (resize) {
2340                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2341
2342                 memcpy(buckets->b,
2343                        old_buckets->b,
2344                        n * sizeof(struct bucket));
2345                 memcpy(buckets_nouse,
2346                        ca->buckets_nouse,
2347                        BITS_TO_LONGS(n) * sizeof(unsigned long));
2348         }
2349
2350         rcu_assign_pointer(ca->buckets[0], buckets);
2351         buckets = old_buckets;
2352
2353         swap(ca->buckets_nouse, buckets_nouse);
2354
2355         if (resize) {
2356                 percpu_up_write(&c->mark_lock);
2357                 up_write(&c->gc_lock);
2358         }
2359
2360         spin_lock(&c->freelist_lock);
2361         for (i = 0; i < RESERVE_NR; i++) {
2362                 fifo_move(&free[i], &ca->free[i]);
2363                 swap(ca->free[i], free[i]);
2364         }
2365         fifo_move(&free_inc, &ca->free_inc);
2366         swap(ca->free_inc, free_inc);
2367         spin_unlock(&c->freelist_lock);
2368
2369         /* with gc lock held, alloc_heap can't be in use: */
2370         swap(ca->alloc_heap, alloc_heap);
2371
2372         nbuckets = ca->mi.nbuckets;
2373
2374         if (resize)
2375                 up_write(&ca->bucket_lock);
2376
2377         ret = 0;
2378 err:
2379         free_heap(&alloc_heap);
2380         free_fifo(&free_inc);
2381         for (i = 0; i < RESERVE_NR; i++)
2382                 free_fifo(&free[i]);
2383         kvpfree(buckets_nouse,
2384                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2385         if (buckets)
2386                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2387
2388         return ret;
2389 }
2390
2391 void bch2_dev_buckets_free(struct bch_dev *ca)
2392 {
2393         unsigned i;
2394
2395         free_heap(&ca->alloc_heap);
2396         free_fifo(&ca->free_inc);
2397         for (i = 0; i < RESERVE_NR; i++)
2398                 free_fifo(&ca->free[i]);
2399         kvpfree(ca->buckets_nouse,
2400                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2401         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2402                 sizeof(struct bucket_array) +
2403                 ca->mi.nbuckets * sizeof(struct bucket));
2404
2405         for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2406                 free_percpu(ca->usage[i]);
2407         kfree(ca->usage_base);
2408 }
2409
2410 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2411 {
2412         unsigned i;
2413
2414         ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2415         if (!ca->usage_base)
2416                 return -ENOMEM;
2417
2418         for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2419                 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2420                 if (!ca->usage[i])
2421                         return -ENOMEM;
2422         }
2423
2424         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2425 }