]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
Update bcachefs sources to 0010403265 bcachefs: Fix spurious alloc errors on forced...
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  *
7  * Bucket states:
8  * - free bucket: mark == 0
9  *   The bucket contains no data and will not be read
10  *
11  * - allocator bucket: owned_by_allocator == 1
12  *   The bucket is on a free list, or it is an open bucket
13  *
14  * - cached bucket: owned_by_allocator == 0 &&
15  *                  dirty_sectors == 0 &&
16  *                  cached_sectors > 0
17  *   The bucket contains data but may be safely discarded as there are
18  *   enough replicas of the data on other cache devices, or it has been
19  *   written back to the backing device
20  *
21  * - dirty bucket: owned_by_allocator == 0 &&
22  *                 dirty_sectors > 0
23  *   The bucket contains data that we must not discard (either only copy,
24  *   or one of the 'main copies' for data requiring multiple replicas)
25  *
26  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27  *   This is a btree node, journal or gen/prio bucket
28  *
29  * Lifecycle:
30  *
31  * bucket invalidated => bucket on freelist => open bucket =>
32  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
33  *
34  * Note that cache promotion can skip the dirty bucket step, as data
35  * is copied from a deeper tier to a shallower tier, onto a cached
36  * bucket.
37  * Note also that a cached bucket can spontaneously become dirty --
38  * see below.
39  *
40  * Only a traversal of the key space can determine whether a bucket is
41  * truly dirty or cached.
42  *
43  * Transitions:
44  *
45  * - free => allocator: bucket was invalidated
46  * - cached => allocator: bucket was invalidated
47  *
48  * - allocator => dirty: open bucket was filled up
49  * - allocator => cached: open bucket was filled up
50  * - allocator => metadata: metadata was allocated
51  *
52  * - dirty => cached: dirty sectors were copied to a deeper tier
53  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54  * - cached => free: cached sectors were overwritten
55  *
56  * - metadata => free: metadata was freed
57  *
58  * Oddities:
59  * - cached => dirty: a device was removed so formerly replicated data
60  *                    is no longer sufficiently replicated
61  * - free => cached: cannot happen
62  * - free => dirty: cannot happen
63  * - free => metadata: cannot happen
64  */
65
66 #include "bcachefs.h"
67 #include "alloc_background.h"
68 #include "bset.h"
69 #include "btree_gc.h"
70 #include "btree_update.h"
71 #include "buckets.h"
72 #include "ec.h"
73 #include "error.h"
74 #include "movinggc.h"
75 #include "replicas.h"
76
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
79
80 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
81                                               enum bch_data_type data_type,
82                                               s64 sectors)
83 {
84         switch (data_type) {
85         case BCH_DATA_btree:
86                 fs_usage->btree         += sectors;
87                 break;
88         case BCH_DATA_user:
89         case BCH_DATA_parity:
90                 fs_usage->data          += sectors;
91                 break;
92         case BCH_DATA_cached:
93                 fs_usage->cached        += sectors;
94                 break;
95         default:
96                 break;
97         }
98 }
99
100 /*
101  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
102  * wraparound:
103  */
104 void bch2_bucket_seq_cleanup(struct bch_fs *c)
105 {
106         u64 journal_seq = atomic64_read(&c->journal.seq);
107         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
108         struct bch_dev *ca;
109         struct bucket_array *buckets;
110         struct bucket *g;
111         struct bucket_mark m;
112         unsigned i;
113
114         if (journal_seq - c->last_bucket_seq_cleanup <
115             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
116                 return;
117
118         c->last_bucket_seq_cleanup = journal_seq;
119
120         for_each_member_device(ca, c, i) {
121                 down_read(&ca->bucket_lock);
122                 buckets = bucket_array(ca);
123
124                 for_each_bucket(g, buckets) {
125                         bucket_cmpxchg(g, m, ({
126                                 if (!m.journal_seq_valid ||
127                                     bucket_needs_journal_commit(m, last_seq_ondisk))
128                                         break;
129
130                                 m.journal_seq_valid = 0;
131                         }));
132                 }
133                 up_read(&ca->bucket_lock);
134         }
135 }
136
137 void bch2_fs_usage_initialize(struct bch_fs *c)
138 {
139         struct bch_fs_usage *usage;
140         unsigned i;
141
142         percpu_down_write(&c->mark_lock);
143         usage = c->usage_base;
144
145         bch2_fs_usage_acc_to_base(c, 0);
146         bch2_fs_usage_acc_to_base(c, 1);
147
148         for (i = 0; i < BCH_REPLICAS_MAX; i++)
149                 usage->reserved += usage->persistent_reserved[i];
150
151         for (i = 0; i < c->replicas.nr; i++) {
152                 struct bch_replicas_entry *e =
153                         cpu_replicas_entry(&c->replicas, i);
154
155                 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
156         }
157
158         percpu_up_write(&c->mark_lock);
159 }
160
161 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
162 {
163         if (fs_usage == c->usage_scratch)
164                 mutex_unlock(&c->usage_scratch_lock);
165         else
166                 kfree(fs_usage);
167 }
168
169 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
170 {
171         struct bch_fs_usage *ret;
172         unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
173
174         ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
175         if (ret)
176                 return ret;
177
178         if (mutex_trylock(&c->usage_scratch_lock))
179                 goto out_pool;
180
181         ret = kzalloc(bytes, GFP_NOFS);
182         if (ret)
183                 return ret;
184
185         mutex_lock(&c->usage_scratch_lock);
186 out_pool:
187         ret = c->usage_scratch;
188         memset(ret, 0, bytes);
189         return ret;
190 }
191
192 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
193 {
194         struct bch_dev_usage ret;
195
196         memset(&ret, 0, sizeof(ret));
197         acc_u64s_percpu((u64 *) &ret,
198                         (u64 __percpu *) ca->usage[0],
199                         sizeof(ret) / sizeof(u64));
200
201         return ret;
202 }
203
204 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
205                                                 unsigned journal_seq,
206                                                 bool gc)
207 {
208         return this_cpu_ptr(gc
209                             ? c->usage_gc
210                             : c->usage[journal_seq & 1]);
211 }
212
213 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
214 {
215         ssize_t offset = v - (u64 *) c->usage_base;
216         unsigned seq;
217         u64 ret;
218
219         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
220         percpu_rwsem_assert_held(&c->mark_lock);
221
222         do {
223                 seq = read_seqcount_begin(&c->usage_lock);
224                 ret = *v +
225                         percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
226                         percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
227         } while (read_seqcount_retry(&c->usage_lock, seq));
228
229         return ret;
230 }
231
232 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
233 {
234         struct bch_fs_usage *ret;
235         unsigned seq, v, u64s = fs_usage_u64s(c);
236 retry:
237         ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
238         if (unlikely(!ret))
239                 return NULL;
240
241         percpu_down_read(&c->mark_lock);
242
243         v = fs_usage_u64s(c);
244         if (unlikely(u64s != v)) {
245                 u64s = v;
246                 percpu_up_read(&c->mark_lock);
247                 kfree(ret);
248                 goto retry;
249         }
250
251         do {
252                 seq = read_seqcount_begin(&c->usage_lock);
253                 memcpy(ret, c->usage_base, u64s * sizeof(u64));
254                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
255                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[1], u64s);
256         } while (read_seqcount_retry(&c->usage_lock, seq));
257
258         return ret;
259 }
260
261 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
262 {
263         unsigned u64s = fs_usage_u64s(c);
264
265         BUG_ON(idx >= 2);
266
267         preempt_disable();
268         write_seqcount_begin(&c->usage_lock);
269
270         acc_u64s_percpu((u64 *) c->usage_base,
271                         (u64 __percpu *) c->usage[idx], u64s);
272         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
273
274         write_seqcount_end(&c->usage_lock);
275         preempt_enable();
276 }
277
278 void bch2_fs_usage_to_text(struct printbuf *out,
279                            struct bch_fs *c,
280                            struct bch_fs_usage *fs_usage)
281 {
282         unsigned i;
283
284         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
285
286         pr_buf(out, "hidden:\t\t\t\t%llu\n",
287                fs_usage->hidden);
288         pr_buf(out, "data:\t\t\t\t%llu\n",
289                fs_usage->data);
290         pr_buf(out, "cached:\t\t\t\t%llu\n",
291                fs_usage->cached);
292         pr_buf(out, "reserved:\t\t\t%llu\n",
293                fs_usage->reserved);
294         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
295                fs_usage->nr_inodes);
296         pr_buf(out, "online reserved:\t\t%llu\n",
297                fs_usage->online_reserved);
298
299         for (i = 0;
300              i < ARRAY_SIZE(fs_usage->persistent_reserved);
301              i++) {
302                 pr_buf(out, "%u replicas:\n", i + 1);
303                 pr_buf(out, "\treserved:\t\t%llu\n",
304                        fs_usage->persistent_reserved[i]);
305         }
306
307         for (i = 0; i < c->replicas.nr; i++) {
308                 struct bch_replicas_entry *e =
309                         cpu_replicas_entry(&c->replicas, i);
310
311                 pr_buf(out, "\t");
312                 bch2_replicas_entry_to_text(out, e);
313                 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
314         }
315 }
316
317 #define RESERVE_FACTOR  6
318
319 static u64 reserve_factor(u64 r)
320 {
321         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
322 }
323
324 static u64 avail_factor(u64 r)
325 {
326         return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
327 }
328
329 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
330 {
331         return min(fs_usage->hidden +
332                    fs_usage->btree +
333                    fs_usage->data +
334                    reserve_factor(fs_usage->reserved +
335                                   fs_usage->online_reserved),
336                    c->capacity);
337 }
338
339 static struct bch_fs_usage_short
340 __bch2_fs_usage_read_short(struct bch_fs *c)
341 {
342         struct bch_fs_usage_short ret;
343         u64 data, reserved;
344
345         ret.capacity = c->capacity -
346                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
347
348         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
349                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
350         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
351                 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
352
353         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
354         ret.free        = ret.capacity - ret.used;
355
356         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
357
358         return ret;
359 }
360
361 struct bch_fs_usage_short
362 bch2_fs_usage_read_short(struct bch_fs *c)
363 {
364         struct bch_fs_usage_short ret;
365
366         percpu_down_read(&c->mark_lock);
367         ret = __bch2_fs_usage_read_short(c);
368         percpu_up_read(&c->mark_lock);
369
370         return ret;
371 }
372
373 static inline int is_unavailable_bucket(struct bucket_mark m)
374 {
375         return !is_available_bucket(m);
376 }
377
378 static inline int is_fragmented_bucket(struct bucket_mark m,
379                                        struct bch_dev *ca)
380 {
381         if (!m.owned_by_allocator &&
382             m.data_type == BCH_DATA_user &&
383             bucket_sectors_used(m))
384                 return max_t(int, 0, (int) ca->mi.bucket_size -
385                              bucket_sectors_used(m));
386         return 0;
387 }
388
389 static inline int is_stripe_data_bucket(struct bucket_mark m)
390 {
391         return m.stripe && m.data_type != BCH_DATA_parity;
392 }
393
394 static inline int bucket_stripe_sectors(struct bucket_mark m)
395 {
396         return is_stripe_data_bucket(m) ? m.dirty_sectors : 0;
397 }
398
399 static inline enum bch_data_type bucket_type(struct bucket_mark m)
400 {
401         return m.cached_sectors && !m.dirty_sectors
402                 ? BCH_DATA_cached
403                 : m.data_type;
404 }
405
406 static bool bucket_became_unavailable(struct bucket_mark old,
407                                       struct bucket_mark new)
408 {
409         return is_available_bucket(old) &&
410                !is_available_bucket(new);
411 }
412
413 int bch2_fs_usage_apply(struct bch_fs *c,
414                         struct bch_fs_usage *fs_usage,
415                         struct disk_reservation *disk_res,
416                         unsigned journal_seq)
417 {
418         s64 added = fs_usage->data + fs_usage->reserved;
419         s64 should_not_have_added;
420         int ret = 0;
421
422         percpu_rwsem_assert_held(&c->mark_lock);
423
424         /*
425          * Not allowed to reduce sectors_available except by getting a
426          * reservation:
427          */
428         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
429         if (WARN_ONCE(should_not_have_added > 0,
430                       "disk usage increased by %lli more than reservation of %llu",
431                       added, disk_res ? disk_res->sectors : 0)) {
432                 atomic64_sub(should_not_have_added, &c->sectors_available);
433                 added -= should_not_have_added;
434                 ret = -1;
435         }
436
437         if (added > 0) {
438                 disk_res->sectors               -= added;
439                 fs_usage->online_reserved       -= added;
440         }
441
442         preempt_disable();
443         acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
444                  (u64 *) fs_usage, fs_usage_u64s(c));
445         preempt_enable();
446
447         return ret;
448 }
449
450 static inline void account_bucket(struct bch_fs_usage *fs_usage,
451                                   struct bch_dev_usage *dev_usage,
452                                   enum bch_data_type type,
453                                   int nr, s64 size)
454 {
455         if (type == BCH_DATA_sb || type == BCH_DATA_journal)
456                 fs_usage->hidden        += size;
457
458         dev_usage->buckets[type]        += nr;
459 }
460
461 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
462                                   struct bch_fs_usage *fs_usage,
463                                   struct bucket_mark old, struct bucket_mark new,
464                                   bool gc)
465 {
466         struct bch_dev_usage *u;
467
468         percpu_rwsem_assert_held(&c->mark_lock);
469
470         preempt_disable();
471         u = this_cpu_ptr(ca->usage[gc]);
472
473         if (bucket_type(old))
474                 account_bucket(fs_usage, u, bucket_type(old),
475                                -1, -ca->mi.bucket_size);
476
477         if (bucket_type(new))
478                 account_bucket(fs_usage, u, bucket_type(new),
479                                1, ca->mi.bucket_size);
480
481         u->buckets_alloc +=
482                 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
483         u->buckets_unavailable +=
484                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
485
486         u->buckets_ec += (int) new.stripe - (int) old.stripe;
487         u->sectors_ec += bucket_stripe_sectors(new) -
488                          bucket_stripe_sectors(old);
489
490         u->sectors[old.data_type] -= old.dirty_sectors;
491         u->sectors[new.data_type] += new.dirty_sectors;
492         u->sectors[BCH_DATA_cached] +=
493                 (int) new.cached_sectors - (int) old.cached_sectors;
494         u->sectors_fragmented +=
495                 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
496         preempt_enable();
497
498         if (!is_available_bucket(old) && is_available_bucket(new))
499                 bch2_wake_allocator(ca);
500 }
501
502 __flatten
503 void bch2_dev_usage_from_buckets(struct bch_fs *c)
504 {
505         struct bch_dev *ca;
506         struct bucket_mark old = { .v.counter = 0 };
507         struct bucket_array *buckets;
508         struct bucket *g;
509         unsigned i;
510         int cpu;
511
512         c->usage_base->hidden = 0;
513
514         for_each_member_device(ca, c, i) {
515                 for_each_possible_cpu(cpu)
516                         memset(per_cpu_ptr(ca->usage[0], cpu), 0,
517                                sizeof(*ca->usage[0]));
518
519                 buckets = bucket_array(ca);
520
521                 for_each_bucket(g, buckets)
522                         bch2_dev_usage_update(c, ca, c->usage_base,
523                                               old, g->mark, false);
524         }
525 }
526
527 static inline int update_replicas(struct bch_fs *c,
528                                   struct bch_fs_usage *fs_usage,
529                                   struct bch_replicas_entry *r,
530                                   s64 sectors)
531 {
532         int idx = bch2_replicas_entry_idx(c, r);
533
534         if (idx < 0)
535                 return -1;
536
537         if (!fs_usage)
538                 return 0;
539
540         fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
541         fs_usage->replicas[idx]         += sectors;
542         return 0;
543 }
544
545 static inline void update_cached_sectors(struct bch_fs *c,
546                                          struct bch_fs_usage *fs_usage,
547                                          unsigned dev, s64 sectors)
548 {
549         struct bch_replicas_padded r;
550
551         bch2_replicas_entry_cached(&r.e, dev);
552
553         update_replicas(c, fs_usage, &r.e, sectors);
554 }
555
556 static struct replicas_delta_list *
557 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
558 {
559         struct replicas_delta_list *d = trans->fs_usage_deltas;
560         unsigned new_size = d ? (d->size + more) * 2 : 128;
561
562         if (!d || d->used + more > d->size) {
563                 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
564                 BUG_ON(!d);
565
566                 d->size = new_size;
567                 trans->fs_usage_deltas = d;
568         }
569         return d;
570 }
571
572 static inline void update_replicas_list(struct btree_trans *trans,
573                                         struct bch_replicas_entry *r,
574                                         s64 sectors)
575 {
576         struct replicas_delta_list *d;
577         struct replicas_delta *n;
578         unsigned b;
579
580         if (!sectors)
581                 return;
582
583         b = replicas_entry_bytes(r) + 8;
584         d = replicas_deltas_realloc(trans, b);
585
586         n = (void *) d->d + d->used;
587         n->delta = sectors;
588         memcpy(&n->r, r, replicas_entry_bytes(r));
589         d->used += b;
590 }
591
592 static inline void update_cached_sectors_list(struct btree_trans *trans,
593                                               unsigned dev, s64 sectors)
594 {
595         struct bch_replicas_padded r;
596
597         bch2_replicas_entry_cached(&r.e, dev);
598
599         update_replicas_list(trans, &r.e, sectors);
600 }
601
602 static inline struct replicas_delta *
603 replicas_delta_next(struct replicas_delta *d)
604 {
605         return (void *) d + replicas_entry_bytes(&d->r) + 8;
606 }
607
608 int bch2_replicas_delta_list_apply(struct bch_fs *c,
609                                    struct bch_fs_usage *fs_usage,
610                                    struct replicas_delta_list *r)
611 {
612         struct replicas_delta *d = r->d;
613         struct replicas_delta *top = (void *) r->d + r->used;
614         unsigned i;
615
616         for (d = r->d; d != top; d = replicas_delta_next(d))
617                 if (update_replicas(c, fs_usage, &d->r, d->delta)) {
618                         top = d;
619                         goto unwind;
620                 }
621
622         if (!fs_usage)
623                 return 0;
624
625         fs_usage->nr_inodes += r->nr_inodes;
626
627         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
628                 fs_usage->reserved += r->persistent_reserved[i];
629                 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
630         }
631
632         return 0;
633 unwind:
634         for (d = r->d; d != top; d = replicas_delta_next(d))
635                 update_replicas(c, fs_usage, &d->r, -d->delta);
636         return -1;
637 }
638
639 #define do_mark_fn(fn, c, pos, flags, ...)                              \
640 ({                                                                      \
641         int gc, ret = 0;                                                \
642                                                                         \
643         percpu_rwsem_assert_held(&c->mark_lock);                        \
644                                                                         \
645         for (gc = 0; gc < 2 && !ret; gc++)                              \
646                 if (!gc == !(flags & BTREE_TRIGGER_GC) ||               \
647                     (gc && gc_visited(c, pos)))                         \
648                         ret = fn(c, __VA_ARGS__, gc);                   \
649         ret;                                                            \
650 })
651
652 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
653                                     size_t b, struct bucket_mark *ret,
654                                     bool gc)
655 {
656         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
657         struct bucket *g = __bucket(ca, b, gc);
658         struct bucket_mark old, new;
659
660         old = bucket_cmpxchg(g, new, ({
661                 BUG_ON(!is_available_bucket(new));
662
663                 new.owned_by_allocator  = true;
664                 new.data_type           = 0;
665                 new.cached_sectors      = 0;
666                 new.dirty_sectors       = 0;
667                 new.gen++;
668         }));
669
670         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
671
672         if (old.cached_sectors)
673                 update_cached_sectors(c, fs_usage, ca->dev_idx,
674                                       -((s64) old.cached_sectors));
675
676         if (!gc)
677                 *ret = old;
678         return 0;
679 }
680
681 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
682                             size_t b, struct bucket_mark *old)
683 {
684         do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
685                    ca, b, old);
686
687         if (!old->owned_by_allocator && old->cached_sectors)
688                 trace_invalidate(ca, bucket_to_sector(ca, b),
689                                  old->cached_sectors);
690 }
691
692 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
693                                     size_t b, bool owned_by_allocator,
694                                     bool gc)
695 {
696         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
697         struct bucket *g = __bucket(ca, b, gc);
698         struct bucket_mark old, new;
699
700         old = bucket_cmpxchg(g, new, ({
701                 new.owned_by_allocator  = owned_by_allocator;
702         }));
703
704         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
705
706         BUG_ON(!gc &&
707                !owned_by_allocator && !old.owned_by_allocator);
708
709         return 0;
710 }
711
712 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
713                             size_t b, bool owned_by_allocator,
714                             struct gc_pos pos, unsigned flags)
715 {
716         preempt_disable();
717
718         do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
719                    ca, b, owned_by_allocator);
720
721         preempt_enable();
722 }
723
724 static int bch2_mark_alloc(struct bch_fs *c,
725                            struct bkey_s_c old, struct bkey_s_c new,
726                            struct bch_fs_usage *fs_usage,
727                            u64 journal_seq, unsigned flags)
728 {
729         bool gc = flags & BTREE_TRIGGER_GC;
730         struct bkey_alloc_unpacked u;
731         struct bch_dev *ca;
732         struct bucket *g;
733         struct bucket_mark old_m, m;
734
735         /* We don't do anything for deletions - do we?: */
736         if (new.k->type != KEY_TYPE_alloc)
737                 return 0;
738
739         /*
740          * alloc btree is read in by bch2_alloc_read, not gc:
741          */
742         if ((flags & BTREE_TRIGGER_GC) &&
743             !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
744                 return 0;
745
746         ca = bch_dev_bkey_exists(c, new.k->p.inode);
747
748         if (new.k->p.offset >= ca->mi.nbuckets)
749                 return 0;
750
751         g = __bucket(ca, new.k->p.offset, gc);
752         u = bch2_alloc_unpack(new);
753
754         old_m = bucket_cmpxchg(g, m, ({
755                 m.gen                   = u.gen;
756                 m.data_type             = u.data_type;
757                 m.dirty_sectors         = u.dirty_sectors;
758                 m.cached_sectors        = u.cached_sectors;
759
760                 if (journal_seq) {
761                         m.journal_seq_valid     = 1;
762                         m.journal_seq           = journal_seq;
763                 }
764         }));
765
766         bch2_dev_usage_update(c, ca, fs_usage, old_m, m, gc);
767
768         g->io_time[READ]        = u.read_time;
769         g->io_time[WRITE]       = u.write_time;
770         g->oldest_gen           = u.oldest_gen;
771         g->gen_valid            = 1;
772
773         /*
774          * need to know if we're getting called from the invalidate path or
775          * not:
776          */
777
778         if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
779             old_m.cached_sectors) {
780                 update_cached_sectors(c, fs_usage, ca->dev_idx,
781                                       -old_m.cached_sectors);
782                 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
783                                  old_m.cached_sectors);
784         }
785
786         return 0;
787 }
788
789 #define checked_add(a, b)                                       \
790 ({                                                              \
791         unsigned _res = (unsigned) (a) + (b);                   \
792         bool overflow = _res > U16_MAX;                         \
793         if (overflow)                                           \
794                 _res = U16_MAX;                                 \
795         (a) = _res;                                             \
796         overflow;                                               \
797 })
798
799 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
800                                        size_t b, enum bch_data_type data_type,
801                                        unsigned sectors, bool gc)
802 {
803         struct bucket *g = __bucket(ca, b, gc);
804         struct bucket_mark old, new;
805         bool overflow;
806
807         BUG_ON(data_type != BCH_DATA_sb &&
808                data_type != BCH_DATA_journal);
809
810         old = bucket_cmpxchg(g, new, ({
811                 new.data_type   = data_type;
812                 overflow = checked_add(new.dirty_sectors, sectors);
813         }));
814
815         bch2_fs_inconsistent_on(old.data_type &&
816                                 old.data_type != data_type, c,
817                 "different types of data in same bucket: %s, %s",
818                 bch2_data_types[old.data_type],
819                 bch2_data_types[data_type]);
820
821         bch2_fs_inconsistent_on(overflow, c,
822                 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
823                 ca->dev_idx, b, new.gen,
824                 bch2_data_types[old.data_type ?: data_type],
825                 old.dirty_sectors, sectors);
826
827         if (c)
828                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
829                                       old, new, gc);
830
831         return 0;
832 }
833
834 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
835                                size_t b, enum bch_data_type type,
836                                unsigned sectors, struct gc_pos pos,
837                                unsigned flags)
838 {
839         BUG_ON(type != BCH_DATA_sb &&
840                type != BCH_DATA_journal);
841
842         preempt_disable();
843
844         if (likely(c)) {
845                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
846                            ca, b, type, sectors);
847         } else {
848                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
849         }
850
851         preempt_enable();
852 }
853
854 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
855 {
856         return DIV_ROUND_UP(sectors * n, d);
857 }
858
859 static s64 __ptr_disk_sectors_delta(unsigned old_size,
860                                     unsigned offset, s64 delta,
861                                     unsigned flags,
862                                     unsigned n, unsigned d)
863 {
864         BUG_ON(!n || !d);
865
866         if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
867                 BUG_ON(offset + -delta > old_size);
868
869                 return -disk_sectors_scaled(n, d, old_size) +
870                         disk_sectors_scaled(n, d, offset) +
871                         disk_sectors_scaled(n, d, old_size - offset + delta);
872         } else if (flags & BTREE_TRIGGER_OVERWRITE) {
873                 BUG_ON(offset + -delta > old_size);
874
875                 return -disk_sectors_scaled(n, d, old_size) +
876                         disk_sectors_scaled(n, d, old_size + delta);
877         } else {
878                 return  disk_sectors_scaled(n, d, delta);
879         }
880 }
881
882 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
883                                   unsigned offset, s64 delta,
884                                   unsigned flags)
885 {
886         return __ptr_disk_sectors_delta(p.crc.live_size,
887                                         offset, delta, flags,
888                                         p.crc.compressed_size,
889                                         p.crc.uncompressed_size);
890 }
891
892 static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
893                             const struct bch_extent_ptr *ptr,
894                             s64 sectors, enum bch_data_type ptr_data_type,
895                             u8 bucket_gen, u8 bucket_data_type,
896                             u16 dirty_sectors, u16 cached_sectors)
897 {
898         size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
899         u16 bucket_sectors = !ptr->cached
900                 ? dirty_sectors
901                 : cached_sectors;
902         char buf[200];
903
904         if (gen_after(ptr->gen, bucket_gen)) {
905                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
906                         "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
907                         "while marking %s",
908                         ptr->dev, bucket_nr, bucket_gen,
909                         bch2_data_types[bucket_data_type ?: ptr_data_type],
910                         ptr->gen,
911                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
912                 return -EIO;
913         }
914
915         if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
916                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
917                         "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
918                         "while marking %s",
919                         ptr->dev, bucket_nr, bucket_gen,
920                         bch2_data_types[bucket_data_type ?: ptr_data_type],
921                         ptr->gen,
922                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
923                 return -EIO;
924         }
925
926         if (bucket_gen != ptr->gen && !ptr->cached) {
927                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
928                         "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
929                         "while marking %s",
930                         ptr->dev, bucket_nr, bucket_gen,
931                         bch2_data_types[bucket_data_type ?: ptr_data_type],
932                         ptr->gen,
933                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
934                 return -EIO;
935         }
936
937         if (bucket_gen != ptr->gen)
938                 return 1;
939
940         if (bucket_data_type && ptr_data_type &&
941             bucket_data_type != ptr_data_type) {
942                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
943                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
944                         "while marking %s",
945                         ptr->dev, bucket_nr, bucket_gen,
946                         bch2_data_types[bucket_data_type],
947                         bch2_data_types[ptr_data_type],
948                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
949                 return -EIO;
950         }
951
952         if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
953                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
954                         "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
955                         "while marking %s",
956                         ptr->dev, bucket_nr, bucket_gen,
957                         bch2_data_types[bucket_data_type ?: ptr_data_type],
958                         bucket_sectors, sectors,
959                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
960                 return -EIO;
961         }
962
963         return 0;
964 }
965
966 static int bucket_set_stripe(struct bch_fs *c, struct bkey_s_c k,
967                              unsigned ptr_idx,
968                              struct bch_fs_usage *fs_usage,
969                              u64 journal_seq, unsigned flags,
970                              bool enabled)
971 {
972         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
973         unsigned nr_data = s->nr_blocks - s->nr_redundant;
974         bool parity = ptr_idx >= nr_data;
975         const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
976         bool gc = flags & BTREE_TRIGGER_GC;
977         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
978         struct bucket *g = PTR_BUCKET(ca, ptr, gc);
979         struct bucket_mark new, old;
980         char buf[200];
981         int ret;
982
983         if (enabled)
984                 g->ec_redundancy = s->nr_redundant;
985
986         old = bucket_cmpxchg(g, new, ({
987                 ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
988                                        new.dirty_sectors, new.cached_sectors);
989                 if (ret)
990                         return ret;
991
992                 if (new.stripe && enabled)
993                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
994                                       "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
995                                       ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen,
996                                       (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
997
998                 if (!new.stripe && !enabled)
999                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1000                                       "bucket %u:%zu gen %u: deleting stripe but not marked\n%s",
1001                                       ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen,
1002                                       (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
1003
1004                 new.stripe                      = enabled;
1005
1006                 if ((flags & BTREE_TRIGGER_GC) && parity) {
1007                         new.data_type = enabled ? BCH_DATA_parity : 0;
1008                         new.dirty_sectors = enabled ? le16_to_cpu(s->sectors): 0;
1009                 }
1010
1011                 if (journal_seq) {
1012                         new.journal_seq_valid   = 1;
1013                         new.journal_seq         = journal_seq;
1014                 }
1015         }));
1016
1017         if (!enabled)
1018                 g->ec_redundancy = 0;
1019
1020         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1021         return 0;
1022 }
1023
1024 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1025                           const struct bch_extent_ptr *ptr,
1026                           s64 sectors, enum bch_data_type ptr_data_type,
1027                           u8 bucket_gen, u8 *bucket_data_type,
1028                           u16 *dirty_sectors, u16 *cached_sectors)
1029 {
1030         u16 *dst_sectors = !ptr->cached
1031                 ? dirty_sectors
1032                 : cached_sectors;
1033         int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
1034                                    bucket_gen, *bucket_data_type,
1035                                    *dirty_sectors, *cached_sectors);
1036
1037         if (ret)
1038                 return ret;
1039
1040         *dst_sectors += sectors;
1041         *bucket_data_type = *dirty_sectors || *cached_sectors
1042                 ? ptr_data_type : 0;
1043         return 0;
1044 }
1045
1046 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1047                              struct extent_ptr_decoded p,
1048                              s64 sectors, enum bch_data_type data_type,
1049                              struct bch_fs_usage *fs_usage,
1050                              u64 journal_seq, unsigned flags)
1051 {
1052         bool gc = flags & BTREE_TRIGGER_GC;
1053         struct bucket_mark old, new;
1054         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1055         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
1056         u8 bucket_data_type;
1057         u64 v;
1058         int ret;
1059
1060         v = atomic64_read(&g->_mark.v);
1061         do {
1062                 new.v.counter = old.v.counter = v;
1063                 bucket_data_type = new.data_type;
1064
1065                 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
1066                                      &bucket_data_type,
1067                                      &new.dirty_sectors,
1068                                      &new.cached_sectors);
1069                 if (ret)
1070                         return ret;
1071
1072                 new.data_type = bucket_data_type;
1073
1074                 if (journal_seq) {
1075                         new.journal_seq_valid = 1;
1076                         new.journal_seq = journal_seq;
1077                 }
1078
1079                 if (flags & BTREE_TRIGGER_NOATOMIC) {
1080                         g->_mark = new;
1081                         break;
1082                 }
1083         } while ((v = atomic64_cmpxchg(&g->_mark.v,
1084                               old.v.counter,
1085                               new.v.counter)) != old.v.counter);
1086
1087         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1088
1089         BUG_ON(!gc && bucket_became_unavailable(old, new));
1090
1091         return 0;
1092 }
1093
1094 static int bch2_mark_stripe_ptr(struct bch_fs *c,
1095                                 struct bch_extent_stripe_ptr p,
1096                                 enum bch_data_type data_type,
1097                                 struct bch_fs_usage *fs_usage,
1098                                 s64 sectors, unsigned flags)
1099 {
1100         bool gc = flags & BTREE_TRIGGER_GC;
1101         struct bch_replicas_padded r;
1102         struct stripe *m;
1103         unsigned i, blocks_nonempty = 0;
1104
1105         m = genradix_ptr(&c->stripes[gc], p.idx);
1106
1107         spin_lock(&c->ec_stripes_heap_lock);
1108
1109         if (!m || !m->alive) {
1110                 spin_unlock(&c->ec_stripes_heap_lock);
1111                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1112                                     (u64) p.idx);
1113                 return -EIO;
1114         }
1115
1116         m->block_sectors[p.block] += sectors;
1117
1118         r = m->r;
1119
1120         for (i = 0; i < m->nr_blocks; i++)
1121                 blocks_nonempty += m->block_sectors[i] != 0;
1122
1123         if (m->blocks_nonempty != blocks_nonempty) {
1124                 m->blocks_nonempty = blocks_nonempty;
1125                 if (!gc)
1126                         bch2_stripes_heap_update(c, m, p.idx);
1127         }
1128
1129         spin_unlock(&c->ec_stripes_heap_lock);
1130
1131         r.e.data_type = data_type;
1132         update_replicas(c, fs_usage, &r.e, sectors);
1133
1134         return 0;
1135 }
1136
1137 static int bch2_mark_extent(struct bch_fs *c,
1138                             struct bkey_s_c old, struct bkey_s_c new,
1139                             unsigned offset, s64 sectors,
1140                             enum bch_data_type data_type,
1141                             struct bch_fs_usage *fs_usage,
1142                             unsigned journal_seq, unsigned flags)
1143 {
1144         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1145         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1146         const union bch_extent_entry *entry;
1147         struct extent_ptr_decoded p;
1148         struct bch_replicas_padded r;
1149         s64 dirty_sectors = 0;
1150         bool stale;
1151         int ret;
1152
1153         r.e.data_type   = data_type;
1154         r.e.nr_devs     = 0;
1155         r.e.nr_required = 1;
1156
1157         BUG_ON(!sectors);
1158
1159         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1160                 s64 disk_sectors = data_type == BCH_DATA_btree
1161                         ? sectors
1162                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1163
1164                 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
1165                                         fs_usage, journal_seq, flags);
1166                 if (ret < 0)
1167                         return ret;
1168
1169                 stale = ret > 0;
1170
1171                 if (p.ptr.cached) {
1172                         if (!stale)
1173                                 update_cached_sectors(c, fs_usage, p.ptr.dev,
1174                                                       disk_sectors);
1175                 } else if (!p.has_ec) {
1176                         dirty_sectors          += disk_sectors;
1177                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1178                 } else {
1179                         ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1180                                         fs_usage, disk_sectors, flags);
1181                         if (ret)
1182                                 return ret;
1183
1184                         /*
1185                          * There may be other dirty pointers in this extent, but
1186                          * if so they're not required for mounting if we have an
1187                          * erasure coded pointer in this extent:
1188                          */
1189                         r.e.nr_required = 0;
1190                 }
1191         }
1192
1193         if (r.e.nr_devs)
1194                 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1195
1196         return 0;
1197 }
1198
1199 static int bch2_mark_stripe(struct bch_fs *c,
1200                             struct bkey_s_c old, struct bkey_s_c new,
1201                             struct bch_fs_usage *fs_usage,
1202                             u64 journal_seq, unsigned flags)
1203 {
1204         bool gc = flags & BTREE_TRIGGER_GC;
1205         size_t idx = new.k->p.offset;
1206         const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1207                 ? bkey_s_c_to_stripe(old).v : NULL;
1208         const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1209                 ? bkey_s_c_to_stripe(new).v : NULL;
1210         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1211         unsigned i;
1212         int ret;
1213
1214         if (!m || (old_s && !m->alive)) {
1215                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1216                                     idx);
1217                 return -1;
1218         }
1219
1220         if (!new_s) {
1221                 /* Deleting: */
1222                 for (i = 0; i < old_s->nr_blocks; i++) {
1223                         ret = bucket_set_stripe(c, old, i, fs_usage,
1224                                                 journal_seq, flags, false);
1225                         if (ret)
1226                                 return ret;
1227                 }
1228
1229                 if (!gc && m->on_heap) {
1230                         spin_lock(&c->ec_stripes_heap_lock);
1231                         bch2_stripes_heap_del(c, m, idx);
1232                         spin_unlock(&c->ec_stripes_heap_lock);
1233                 }
1234
1235                 if (gc)
1236                         update_replicas(c, fs_usage, &m->r.e,
1237                                         -((s64) m->sectors * m->nr_redundant));
1238
1239                 memset(m, 0, sizeof(*m));
1240         } else {
1241                 BUG_ON(old_s && new_s->nr_blocks != old_s->nr_blocks);
1242                 BUG_ON(old_s && new_s->nr_redundant != old_s->nr_redundant);
1243
1244                 for (i = 0; i < new_s->nr_blocks; i++) {
1245                         if (!old_s ||
1246                             memcmp(new_s->ptrs + i,
1247                                    old_s->ptrs + i,
1248                                    sizeof(struct bch_extent_ptr))) {
1249
1250                                 if (old_s) {
1251                                         bucket_set_stripe(c, old, i, fs_usage,
1252                                                           journal_seq, flags, false);
1253                                         if (ret)
1254                                                 return ret;
1255                                 }
1256                                 ret = bucket_set_stripe(c, new, i, fs_usage,
1257                                                         journal_seq, flags, true);
1258                                 if (ret)
1259                                         return ret;
1260                         }
1261                 }
1262
1263                 m->alive        = true;
1264                 m->sectors      = le16_to_cpu(new_s->sectors);
1265                 m->algorithm    = new_s->algorithm;
1266                 m->nr_blocks    = new_s->nr_blocks;
1267                 m->nr_redundant = new_s->nr_redundant;
1268                 m->blocks_nonempty = 0;
1269
1270                 for (i = 0; i < new_s->nr_blocks; i++) {
1271                         m->block_sectors[i] =
1272                                 stripe_blockcount_get(new_s, i);
1273                         m->blocks_nonempty += !!m->block_sectors[i];
1274                 }
1275
1276                 if (gc && old_s)
1277                         update_replicas(c, fs_usage, &m->r.e,
1278                                         -((s64) m->sectors * m->nr_redundant));
1279
1280                 bch2_bkey_to_replicas(&m->r.e, new);
1281
1282                 if (gc)
1283                         update_replicas(c, fs_usage, &m->r.e,
1284                                         ((s64) m->sectors * m->nr_redundant));
1285
1286                 if (!gc) {
1287                         spin_lock(&c->ec_stripes_heap_lock);
1288                         bch2_stripes_heap_update(c, m, idx);
1289                         spin_unlock(&c->ec_stripes_heap_lock);
1290                 }
1291         }
1292
1293         return 0;
1294 }
1295
1296 static int bch2_mark_key_locked(struct bch_fs *c,
1297                    struct bkey_s_c old,
1298                    struct bkey_s_c new,
1299                    unsigned offset, s64 sectors,
1300                    struct bch_fs_usage *fs_usage,
1301                    u64 journal_seq, unsigned flags)
1302 {
1303         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1304         int ret = 0;
1305
1306         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1307
1308         preempt_disable();
1309
1310         if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1311                 fs_usage = fs_usage_ptr(c, journal_seq,
1312                                         flags & BTREE_TRIGGER_GC);
1313
1314         switch (k.k->type) {
1315         case KEY_TYPE_alloc:
1316                 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1317                 break;
1318         case KEY_TYPE_btree_ptr:
1319         case KEY_TYPE_btree_ptr_v2:
1320                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1321                         ?  c->opts.btree_node_size
1322                         : -c->opts.btree_node_size;
1323
1324                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1325                                 BCH_DATA_btree, fs_usage, journal_seq, flags);
1326                 break;
1327         case KEY_TYPE_extent:
1328         case KEY_TYPE_reflink_v:
1329                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1330                                 BCH_DATA_user, fs_usage, journal_seq, flags);
1331                 break;
1332         case KEY_TYPE_stripe:
1333                 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1334                 break;
1335         case KEY_TYPE_inode:
1336                 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1337                         fs_usage->nr_inodes++;
1338                 else
1339                         fs_usage->nr_inodes--;
1340                 break;
1341         case KEY_TYPE_reservation: {
1342                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1343
1344                 sectors *= replicas;
1345                 replicas = clamp_t(unsigned, replicas, 1,
1346                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1347
1348                 fs_usage->reserved                              += sectors;
1349                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1350                 break;
1351         }
1352         }
1353
1354         preempt_enable();
1355
1356         return ret;
1357 }
1358
1359 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1360                   unsigned offset, s64 sectors,
1361                   struct bch_fs_usage *fs_usage,
1362                   u64 journal_seq, unsigned flags)
1363 {
1364         struct bkey deleted;
1365         struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1366         int ret;
1367
1368         bkey_init(&deleted);
1369
1370         percpu_down_read(&c->mark_lock);
1371         ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1372                                    fs_usage, journal_seq,
1373                                    BTREE_TRIGGER_INSERT|flags);
1374         percpu_up_read(&c->mark_lock);
1375
1376         return ret;
1377 }
1378
1379 int bch2_mark_update(struct btree_trans *trans,
1380                      struct btree_iter *iter,
1381                      struct bkey_i *new,
1382                      struct bch_fs_usage *fs_usage,
1383                      unsigned flags)
1384 {
1385         struct bch_fs           *c = trans->c;
1386         struct btree            *b = iter_l(iter)->b;
1387         struct btree_node_iter  node_iter = iter_l(iter)->iter;
1388         struct bkey_packed      *_old;
1389         struct bkey_s_c         old;
1390         struct bkey             unpacked;
1391         int ret = 0;
1392
1393         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1394                 return 0;
1395
1396         if (!btree_node_type_needs_gc(iter->btree_id))
1397                 return 0;
1398
1399         bkey_init(&unpacked);
1400         old = (struct bkey_s_c) { &unpacked, NULL };
1401
1402         if (!btree_node_type_is_extents(iter->btree_id)) {
1403                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1404                         _old = bch2_btree_node_iter_peek(&node_iter, b);
1405                         if (_old)
1406                                 old = bkey_disassemble(b, _old, &unpacked);
1407                 } else {
1408                         struct bkey_cached *ck = (void *) iter->l[0].b;
1409
1410                         if (ck->valid)
1411                                 old = bkey_i_to_s_c(ck->k);
1412                 }
1413
1414                 if (old.k->type == new->k.type) {
1415                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1416                                 fs_usage, trans->journal_res.seq,
1417                                 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1418
1419                 } else {
1420                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1421                                 fs_usage, trans->journal_res.seq,
1422                                 BTREE_TRIGGER_INSERT|flags);
1423                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1424                                 fs_usage, trans->journal_res.seq,
1425                                 BTREE_TRIGGER_OVERWRITE|flags);
1426                 }
1427         } else {
1428                 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1429                 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1430                         0, new->k.size,
1431                         fs_usage, trans->journal_res.seq,
1432                         BTREE_TRIGGER_INSERT|flags);
1433
1434                 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
1435                         unsigned offset = 0;
1436                         s64 sectors;
1437
1438                         old = bkey_disassemble(b, _old, &unpacked);
1439                         sectors = -((s64) old.k->size);
1440
1441                         flags |= BTREE_TRIGGER_OVERWRITE;
1442
1443                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1444                                 return 0;
1445
1446                         switch (bch2_extent_overlap(&new->k, old.k)) {
1447                         case BCH_EXTENT_OVERLAP_ALL:
1448                                 offset = 0;
1449                                 sectors = -((s64) old.k->size);
1450                                 break;
1451                         case BCH_EXTENT_OVERLAP_BACK:
1452                                 offset = bkey_start_offset(&new->k) -
1453                                         bkey_start_offset(old.k);
1454                                 sectors = bkey_start_offset(&new->k) -
1455                                         old.k->p.offset;
1456                                 break;
1457                         case BCH_EXTENT_OVERLAP_FRONT:
1458                                 offset = 0;
1459                                 sectors = bkey_start_offset(old.k) -
1460                                         new->k.p.offset;
1461                                 break;
1462                         case BCH_EXTENT_OVERLAP_MIDDLE:
1463                                 offset = bkey_start_offset(&new->k) -
1464                                         bkey_start_offset(old.k);
1465                                 sectors = -((s64) new->k.size);
1466                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1467                                 break;
1468                         }
1469
1470                         BUG_ON(sectors >= 0);
1471
1472                         ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1473                                         offset, sectors, fs_usage,
1474                                         trans->journal_res.seq, flags) ?: 1;
1475                         if (ret <= 0)
1476                                 break;
1477
1478                         bch2_btree_node_iter_advance(&node_iter, b);
1479                 }
1480         }
1481
1482         return ret;
1483 }
1484
1485 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1486                                struct bch_fs_usage *fs_usage)
1487 {
1488         struct bch_fs *c = trans->c;
1489         struct btree_insert_entry *i;
1490         static int warned_disk_usage = 0;
1491         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1492         char buf[200];
1493
1494         if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1495                                  trans->journal_res.seq) ||
1496             warned_disk_usage ||
1497             xchg(&warned_disk_usage, 1))
1498                 return;
1499
1500         bch_err(c, "disk usage increased more than %llu sectors reserved",
1501                 disk_res_sectors);
1502
1503         trans_for_each_update(trans, i) {
1504                 pr_err("while inserting");
1505                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1506                 pr_err("%s", buf);
1507                 pr_err("overlapping with");
1508
1509                 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1510                         struct btree            *b = iter_l(i->iter)->b;
1511                         struct btree_node_iter  node_iter = iter_l(i->iter)->iter;
1512                         struct bkey_packed      *_k;
1513
1514                         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1515                                 struct bkey             unpacked;
1516                                 struct bkey_s_c         k;
1517
1518                                 pr_info("_k %px format %u", _k, _k->format);
1519                                 k = bkey_disassemble(b, _k, &unpacked);
1520
1521                                 if (btree_node_is_extents(b)
1522                                     ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1523                                     : bkey_cmp(i->k->k.p, k.k->p))
1524                                         break;
1525
1526                                 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1527                                 pr_err("%s", buf);
1528
1529                                 bch2_btree_node_iter_advance(&node_iter, b);
1530                         }
1531                 } else {
1532                         struct bkey_cached *ck = (void *) i->iter->l[0].b;
1533
1534                         if (ck->valid) {
1535                                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1536                                 pr_err("%s", buf);
1537                         }
1538                 }
1539         }
1540 }
1541
1542 /* trans_mark: */
1543
1544 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1545                             enum btree_id btree_id, struct bpos pos,
1546                             struct bkey_s_c *k)
1547 {
1548         struct btree_insert_entry *i;
1549
1550         trans_for_each_update(trans, i)
1551                 if (i->iter->btree_id == btree_id &&
1552                     (btree_node_type_is_extents(btree_id)
1553                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1554                        bkey_cmp(pos, i->k->k.p) < 0
1555                      : !bkey_cmp(pos, i->iter->pos))) {
1556                         *k = bkey_i_to_s_c(i->k);
1557                         return i->iter;
1558                 }
1559
1560         return NULL;
1561 }
1562
1563 static int trans_get_key(struct btree_trans *trans,
1564                          enum btree_id btree_id, struct bpos pos,
1565                          struct btree_iter **iter,
1566                          struct bkey_s_c *k)
1567 {
1568         unsigned flags = btree_id != BTREE_ID_ALLOC
1569                 ? BTREE_ITER_SLOTS
1570                 : BTREE_ITER_CACHED;
1571         int ret;
1572
1573         *iter = trans_get_update(trans, btree_id, pos, k);
1574         if (*iter)
1575                 return 1;
1576
1577         *iter = bch2_trans_get_iter(trans, btree_id, pos,
1578                                     flags|BTREE_ITER_INTENT);
1579         *k = __bch2_btree_iter_peek(*iter, flags);
1580         ret = bkey_err(*k);
1581         if (ret)
1582                 bch2_trans_iter_put(trans, *iter);
1583         return ret;
1584 }
1585
1586 static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter,
1587                                          const struct bch_extent_ptr *ptr,
1588                                          struct bkey_alloc_unpacked *u)
1589 {
1590         struct bch_fs *c = trans->c;
1591         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1592         struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1593         struct bucket *g;
1594         struct btree_iter *iter;
1595         struct bkey_s_c k;
1596         int ret;
1597
1598         iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k);
1599         if (iter) {
1600                 *u = bch2_alloc_unpack(k);
1601         } else {
1602                 iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
1603                                            BTREE_ITER_CACHED|
1604                                            BTREE_ITER_CACHED_NOFILL|
1605                                            BTREE_ITER_INTENT);
1606                 ret = bch2_btree_iter_traverse(iter);
1607                 if (ret) {
1608                         bch2_trans_iter_put(trans, iter);
1609                         return ret;
1610                 }
1611
1612                 percpu_down_read(&c->mark_lock);
1613                 g = bucket(ca, pos.offset);
1614                 *u = alloc_mem_to_key(g, READ_ONCE(g->mark));
1615                 percpu_up_read(&c->mark_lock);
1616         }
1617
1618         *_iter = iter;
1619         return 0;
1620 }
1621
1622 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1623                         struct bkey_s_c k, struct extent_ptr_decoded p,
1624                         s64 sectors, enum bch_data_type data_type)
1625 {
1626         struct bch_fs *c = trans->c;
1627         struct btree_iter *iter;
1628         struct bkey_alloc_unpacked u;
1629         struct bkey_i_alloc *a;
1630         int ret;
1631
1632         ret = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1633         if (ret)
1634                 return ret;
1635
1636         ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
1637                              &u.dirty_sectors, &u.cached_sectors);
1638         if (ret)
1639                 goto out;
1640
1641         a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1642         ret = PTR_ERR_OR_ZERO(a);
1643         if (ret)
1644                 goto out;
1645
1646         bkey_alloc_init(&a->k_i);
1647         a->k.p = iter->pos;
1648         bch2_alloc_pack(a, u);
1649         bch2_trans_update(trans, iter, &a->k_i, 0);
1650 out:
1651         bch2_trans_iter_put(trans, iter);
1652         return ret;
1653 }
1654
1655 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1656                         struct bch_extent_stripe_ptr p,
1657                         s64 sectors, enum bch_data_type data_type)
1658 {
1659         struct bch_fs *c = trans->c;
1660         struct btree_iter *iter;
1661         struct bkey_s_c k;
1662         struct bkey_i_stripe *s;
1663         struct bch_replicas_padded r;
1664         int ret = 0;
1665
1666         ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1667         if (ret < 0)
1668                 return ret;
1669
1670         if (k.k->type != KEY_TYPE_stripe) {
1671                 bch2_fs_inconsistent(c,
1672                         "pointer to nonexistent stripe %llu",
1673                         (u64) p.idx);
1674                 ret = -EIO;
1675                 goto out;
1676         }
1677
1678         s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1679         ret = PTR_ERR_OR_ZERO(s);
1680         if (ret)
1681                 goto out;
1682
1683         bkey_reassemble(&s->k_i, k);
1684         stripe_blockcount_set(&s->v, p.block,
1685                 stripe_blockcount_get(&s->v, p.block) +
1686                 sectors);
1687         bch2_trans_update(trans, iter, &s->k_i, 0);
1688
1689         bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1690         r.e.data_type = data_type;
1691         update_replicas_list(trans, &r.e, sectors);
1692 out:
1693         bch2_trans_iter_put(trans, iter);
1694         return ret;
1695 }
1696
1697 static int bch2_trans_mark_extent(struct btree_trans *trans,
1698                         struct bkey_s_c k, unsigned offset,
1699                         s64 sectors, unsigned flags,
1700                         enum bch_data_type data_type)
1701 {
1702         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1703         const union bch_extent_entry *entry;
1704         struct extent_ptr_decoded p;
1705         struct bch_replicas_padded r;
1706         s64 dirty_sectors = 0;
1707         bool stale;
1708         int ret;
1709
1710         r.e.data_type   = data_type;
1711         r.e.nr_devs     = 0;
1712         r.e.nr_required = 1;
1713
1714         BUG_ON(!sectors);
1715
1716         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1717                 s64 disk_sectors = data_type == BCH_DATA_btree
1718                         ? sectors
1719                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1720
1721                 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1722                                               data_type);
1723                 if (ret < 0)
1724                         return ret;
1725
1726                 stale = ret > 0;
1727
1728                 if (p.ptr.cached) {
1729                         if (!stale)
1730                                 update_cached_sectors_list(trans, p.ptr.dev,
1731                                                            disk_sectors);
1732                 } else if (!p.has_ec) {
1733                         dirty_sectors          += disk_sectors;
1734                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1735                 } else {
1736                         ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
1737                                         disk_sectors, data_type);
1738                         if (ret)
1739                                 return ret;
1740
1741                         r.e.nr_required = 0;
1742                 }
1743         }
1744
1745         if (r.e.nr_devs)
1746                 update_replicas_list(trans, &r.e, dirty_sectors);
1747
1748         return 0;
1749 }
1750
1751 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1752                                   struct bkey_s_c k,
1753                                   unsigned flags)
1754 {
1755         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
1756         unsigned nr_data = s->nr_blocks - s->nr_redundant;
1757         struct bch_replicas_padded r;
1758         struct bkey_alloc_unpacked u;
1759         struct bkey_i_alloc *a;
1760         struct btree_iter *iter;
1761         bool deleting = flags & BTREE_TRIGGER_OVERWRITE;
1762         s64 sectors = le16_to_cpu(s->sectors);
1763         unsigned i;
1764         int ret = 0;
1765
1766         if (deleting)
1767                 sectors = -sectors;
1768
1769         bch2_bkey_to_replicas(&r.e, k);
1770         update_replicas_list(trans, &r.e, sectors * s->nr_redundant);
1771
1772         /*
1773          * The allocator code doesn't necessarily update bucket gens in the
1774          * btree when incrementing them, right before handing out new buckets -
1775          * we just need to persist those updates here along with the new stripe:
1776          */
1777
1778         for (i = 0; i < s->nr_blocks && !ret; i++) {
1779                 bool parity = i >= nr_data;
1780
1781                 ret = bch2_trans_start_alloc_update(trans, &iter,
1782                                                     &s->ptrs[i], &u);
1783                 if (ret)
1784                         break;
1785
1786                 if (parity) {
1787                         u.dirty_sectors += sectors;
1788                         u.data_type = u.dirty_sectors
1789                                 ? BCH_DATA_parity
1790                                 : 0;
1791                 }
1792
1793                 a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1794                 ret = PTR_ERR_OR_ZERO(a);
1795                 if (ret)
1796                         goto put_iter;
1797
1798                 bkey_alloc_init(&a->k_i);
1799                 a->k.p = iter->pos;
1800                 bch2_alloc_pack(a, u);
1801                 bch2_trans_update(trans, iter, &a->k_i, 0);
1802 put_iter:
1803                 bch2_trans_iter_put(trans, iter);
1804         }
1805
1806         return ret;
1807 }
1808
1809 static __le64 *bkey_refcount(struct bkey_i *k)
1810 {
1811         switch (k->k.type) {
1812         case KEY_TYPE_reflink_v:
1813                 return &bkey_i_to_reflink_v(k)->v.refcount;
1814         case KEY_TYPE_indirect_inline_data:
1815                 return &bkey_i_to_indirect_inline_data(k)->v.refcount;
1816         default:
1817                 return NULL;
1818         }
1819 }
1820
1821 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1822                         struct bkey_s_c_reflink_p p,
1823                         u64 idx, unsigned sectors,
1824                         unsigned flags)
1825 {
1826         struct bch_fs *c = trans->c;
1827         struct btree_iter *iter;
1828         struct bkey_s_c k;
1829         struct bkey_i *n;
1830         __le64 *refcount;
1831         s64 ret;
1832
1833         ret = trans_get_key(trans, BTREE_ID_REFLINK,
1834                             POS(0, idx), &iter, &k);
1835         if (ret < 0)
1836                 return ret;
1837
1838         if ((flags & BTREE_TRIGGER_OVERWRITE) &&
1839             (bkey_start_offset(k.k) < idx ||
1840              k.k->p.offset > idx + sectors))
1841                 goto out;
1842
1843         sectors = k.k->p.offset - idx;
1844
1845         n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1846         ret = PTR_ERR_OR_ZERO(n);
1847         if (ret)
1848                 goto err;
1849
1850         bkey_reassemble(n, k);
1851
1852         refcount = bkey_refcount(n);
1853         if (!refcount) {
1854                 bch2_fs_inconsistent(c,
1855                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1856                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1857                 ret = -EIO;
1858                 goto err;
1859         }
1860
1861         le64_add_cpu(refcount, !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
1862
1863         if (!*refcount) {
1864                 n->k.type = KEY_TYPE_deleted;
1865                 set_bkey_val_u64s(&n->k, 0);
1866         }
1867
1868         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1869         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1870
1871         bch2_trans_update(trans, iter, n, 0);
1872 out:
1873         ret = sectors;
1874 err:
1875         bch2_trans_iter_put(trans, iter);
1876         return ret;
1877 }
1878
1879 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1880                         struct bkey_s_c_reflink_p p, unsigned offset,
1881                         s64 sectors, unsigned flags)
1882 {
1883         u64 idx = le64_to_cpu(p.v->idx) + offset;
1884         s64 ret = 0;
1885
1886         sectors = abs(sectors);
1887         BUG_ON(offset + sectors > p.k->size);
1888
1889         while (sectors) {
1890                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1891                 if (ret < 0)
1892                         break;
1893
1894                 idx += ret;
1895                 sectors = max_t(s64, 0LL, sectors - ret);
1896                 ret = 0;
1897         }
1898
1899         return ret;
1900 }
1901
1902 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
1903                         unsigned offset, s64 sectors, unsigned flags)
1904 {
1905         struct replicas_delta_list *d;
1906         struct bch_fs *c = trans->c;
1907
1908         switch (k.k->type) {
1909         case KEY_TYPE_btree_ptr:
1910         case KEY_TYPE_btree_ptr_v2:
1911                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1912                         ?  c->opts.btree_node_size
1913                         : -c->opts.btree_node_size;
1914
1915                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1916                                               flags, BCH_DATA_btree);
1917         case KEY_TYPE_extent:
1918         case KEY_TYPE_reflink_v:
1919                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1920                                               flags, BCH_DATA_user);
1921         case KEY_TYPE_stripe:
1922                 return bch2_trans_mark_stripe(trans, k, flags);
1923         case KEY_TYPE_inode:
1924                 d = replicas_deltas_realloc(trans, 0);
1925
1926                 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1927                         d->nr_inodes++;
1928                 else
1929                         d->nr_inodes--;
1930                 return 0;
1931         case KEY_TYPE_reservation: {
1932                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1933
1934                 d = replicas_deltas_realloc(trans, 0);
1935
1936                 sectors *= replicas;
1937                 replicas = clamp_t(unsigned, replicas, 1,
1938                                    ARRAY_SIZE(d->persistent_reserved));
1939
1940                 d->persistent_reserved[replicas - 1] += sectors;
1941                 return 0;
1942         }
1943         case KEY_TYPE_reflink_p:
1944                 return bch2_trans_mark_reflink_p(trans,
1945                                         bkey_s_c_to_reflink_p(k),
1946                                         offset, sectors, flags);
1947         default:
1948                 return 0;
1949         }
1950 }
1951
1952 int bch2_trans_mark_update(struct btree_trans *trans,
1953                            struct btree_iter *iter,
1954                            struct bkey_i *insert,
1955                            unsigned flags)
1956 {
1957         struct btree            *b = iter_l(iter)->b;
1958         struct btree_node_iter  node_iter = iter_l(iter)->iter;
1959         struct bkey_packed      *_k;
1960         int ret;
1961
1962         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1963                 return 0;
1964
1965         if (!btree_node_type_needs_gc(iter->btree_id))
1966                 return 0;
1967
1968         ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
1969                         0, insert->k.size, BTREE_TRIGGER_INSERT);
1970         if (ret)
1971                 return ret;
1972
1973         if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1974                 struct bkey_cached *ck = (void *) iter->l[0].b;
1975
1976                 return bch2_trans_mark_key(trans, bkey_i_to_s_c(ck->k),
1977                                            0, 0, BTREE_TRIGGER_OVERWRITE);
1978         }
1979
1980         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1981                 struct bkey             unpacked;
1982                 struct bkey_s_c         k;
1983                 unsigned                offset = 0;
1984                 s64                     sectors = 0;
1985                 unsigned                flags = BTREE_TRIGGER_OVERWRITE;
1986
1987                 k = bkey_disassemble(b, _k, &unpacked);
1988
1989                 if (btree_node_is_extents(b)
1990                     ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
1991                     : bkey_cmp(insert->k.p, k.k->p))
1992                         break;
1993
1994                 if (btree_node_is_extents(b)) {
1995                         switch (bch2_extent_overlap(&insert->k, k.k)) {
1996                         case BCH_EXTENT_OVERLAP_ALL:
1997                                 offset = 0;
1998                                 sectors = -((s64) k.k->size);
1999                                 break;
2000                         case BCH_EXTENT_OVERLAP_BACK:
2001                                 offset = bkey_start_offset(&insert->k) -
2002                                         bkey_start_offset(k.k);
2003                                 sectors = bkey_start_offset(&insert->k) -
2004                                         k.k->p.offset;
2005                                 break;
2006                         case BCH_EXTENT_OVERLAP_FRONT:
2007                                 offset = 0;
2008                                 sectors = bkey_start_offset(k.k) -
2009                                         insert->k.p.offset;
2010                                 break;
2011                         case BCH_EXTENT_OVERLAP_MIDDLE:
2012                                 offset = bkey_start_offset(&insert->k) -
2013                                         bkey_start_offset(k.k);
2014                                 sectors = -((s64) insert->k.size);
2015                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
2016                                 break;
2017                         }
2018
2019                         BUG_ON(sectors >= 0);
2020                 }
2021
2022                 ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
2023                 if (ret)
2024                         return ret;
2025
2026                 bch2_btree_node_iter_advance(&node_iter, b);
2027         }
2028
2029         return 0;
2030 }
2031
2032 /* Disk reservations: */
2033
2034 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
2035 {
2036         percpu_u64_set(&c->pcpu->sectors_available, 0);
2037
2038         return avail_factor(__bch2_fs_usage_read_short(c).free);
2039 }
2040
2041 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
2042 {
2043         percpu_down_read(&c->mark_lock);
2044         this_cpu_sub(c->usage[0]->online_reserved,
2045                      res->sectors);
2046         percpu_up_read(&c->mark_lock);
2047
2048         res->sectors = 0;
2049 }
2050
2051 #define SECTORS_CACHE   1024
2052
2053 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2054                               unsigned sectors, int flags)
2055 {
2056         struct bch_fs_pcpu *pcpu;
2057         u64 old, v, get;
2058         s64 sectors_available;
2059         int ret;
2060
2061         percpu_down_read(&c->mark_lock);
2062         preempt_disable();
2063         pcpu = this_cpu_ptr(c->pcpu);
2064
2065         if (sectors <= pcpu->sectors_available)
2066                 goto out;
2067
2068         v = atomic64_read(&c->sectors_available);
2069         do {
2070                 old = v;
2071                 get = min((u64) sectors + SECTORS_CACHE, old);
2072
2073                 if (get < sectors) {
2074                         preempt_enable();
2075                         percpu_up_read(&c->mark_lock);
2076                         goto recalculate;
2077                 }
2078         } while ((v = atomic64_cmpxchg(&c->sectors_available,
2079                                        old, old - get)) != old);
2080
2081         pcpu->sectors_available         += get;
2082
2083 out:
2084         pcpu->sectors_available         -= sectors;
2085         this_cpu_add(c->usage[0]->online_reserved, sectors);
2086         res->sectors                    += sectors;
2087
2088         preempt_enable();
2089         percpu_up_read(&c->mark_lock);
2090         return 0;
2091
2092 recalculate:
2093         percpu_down_write(&c->mark_lock);
2094
2095         sectors_available = bch2_recalc_sectors_available(c);
2096
2097         if (sectors <= sectors_available ||
2098             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2099                 atomic64_set(&c->sectors_available,
2100                              max_t(s64, 0, sectors_available - sectors));
2101                 this_cpu_add(c->usage[0]->online_reserved, sectors);
2102                 res->sectors                    += sectors;
2103                 ret = 0;
2104         } else {
2105                 atomic64_set(&c->sectors_available, sectors_available);
2106                 ret = -ENOSPC;
2107         }
2108
2109         percpu_up_write(&c->mark_lock);
2110
2111         return ret;
2112 }
2113
2114 /* Startup/shutdown: */
2115
2116 static void buckets_free_rcu(struct rcu_head *rcu)
2117 {
2118         struct bucket_array *buckets =
2119                 container_of(rcu, struct bucket_array, rcu);
2120
2121         kvpfree(buckets,
2122                 sizeof(struct bucket_array) +
2123                 buckets->nbuckets * sizeof(struct bucket));
2124 }
2125
2126 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2127 {
2128         struct bucket_array *buckets = NULL, *old_buckets = NULL;
2129         unsigned long *buckets_nouse = NULL;
2130         alloc_fifo      free[RESERVE_NR];
2131         alloc_fifo      free_inc;
2132         alloc_heap      alloc_heap;
2133
2134         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2135                              ca->mi.bucket_size / c->opts.btree_node_size);
2136         /* XXX: these should be tunable */
2137         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
2138         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 7);
2139         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
2140                                       btree_reserve * 2);
2141         bool resize = ca->buckets[0] != NULL;
2142         int ret = -ENOMEM;
2143         unsigned i;
2144
2145         memset(&free,           0, sizeof(free));
2146         memset(&free_inc,       0, sizeof(free_inc));
2147         memset(&alloc_heap,     0, sizeof(alloc_heap));
2148
2149         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
2150                                             nbuckets * sizeof(struct bucket),
2151                                             GFP_KERNEL|__GFP_ZERO)) ||
2152             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2153                                             sizeof(unsigned long),
2154                                             GFP_KERNEL|__GFP_ZERO)) ||
2155             !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
2156             !init_fifo(&free[RESERVE_MOVINGGC],
2157                        copygc_reserve, GFP_KERNEL) ||
2158             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2159             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
2160             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2161                 goto err;
2162
2163         buckets->first_bucket   = ca->mi.first_bucket;
2164         buckets->nbuckets       = nbuckets;
2165
2166         bch2_copygc_stop(c);
2167
2168         if (resize) {
2169                 down_write(&c->gc_lock);
2170                 down_write(&ca->bucket_lock);
2171                 percpu_down_write(&c->mark_lock);
2172         }
2173
2174         old_buckets = bucket_array(ca);
2175
2176         if (resize) {
2177                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2178
2179                 memcpy(buckets->b,
2180                        old_buckets->b,
2181                        n * sizeof(struct bucket));
2182                 memcpy(buckets_nouse,
2183                        ca->buckets_nouse,
2184                        BITS_TO_LONGS(n) * sizeof(unsigned long));
2185         }
2186
2187         rcu_assign_pointer(ca->buckets[0], buckets);
2188         buckets = old_buckets;
2189
2190         swap(ca->buckets_nouse, buckets_nouse);
2191
2192         if (resize) {
2193                 percpu_up_write(&c->mark_lock);
2194                 up_write(&c->gc_lock);
2195         }
2196
2197         spin_lock(&c->freelist_lock);
2198         for (i = 0; i < RESERVE_NR; i++) {
2199                 fifo_move(&free[i], &ca->free[i]);
2200                 swap(ca->free[i], free[i]);
2201         }
2202         fifo_move(&free_inc, &ca->free_inc);
2203         swap(ca->free_inc, free_inc);
2204         spin_unlock(&c->freelist_lock);
2205
2206         /* with gc lock held, alloc_heap can't be in use: */
2207         swap(ca->alloc_heap, alloc_heap);
2208
2209         nbuckets = ca->mi.nbuckets;
2210
2211         if (resize)
2212                 up_write(&ca->bucket_lock);
2213
2214         ret = 0;
2215 err:
2216         free_heap(&alloc_heap);
2217         free_fifo(&free_inc);
2218         for (i = 0; i < RESERVE_NR; i++)
2219                 free_fifo(&free[i]);
2220         kvpfree(buckets_nouse,
2221                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2222         if (buckets)
2223                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2224
2225         return ret;
2226 }
2227
2228 void bch2_dev_buckets_free(struct bch_dev *ca)
2229 {
2230         unsigned i;
2231
2232         free_heap(&ca->alloc_heap);
2233         free_fifo(&ca->free_inc);
2234         for (i = 0; i < RESERVE_NR; i++)
2235                 free_fifo(&ca->free[i]);
2236         kvpfree(ca->buckets_nouse,
2237                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2238         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2239                 sizeof(struct bucket_array) +
2240                 ca->mi.nbuckets * sizeof(struct bucket));
2241
2242         free_percpu(ca->usage[0]);
2243 }
2244
2245 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2246 {
2247         if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2248                 return -ENOMEM;
2249
2250         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2251 }