]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
c3fc3abbc0dc1ef368efccf73bd5661f9b3f7963
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  *
7  * Bucket states:
8  * - free bucket: mark == 0
9  *   The bucket contains no data and will not be read
10  *
11  * - allocator bucket: owned_by_allocator == 1
12  *   The bucket is on a free list, or it is an open bucket
13  *
14  * - cached bucket: owned_by_allocator == 0 &&
15  *                  dirty_sectors == 0 &&
16  *                  cached_sectors > 0
17  *   The bucket contains data but may be safely discarded as there are
18  *   enough replicas of the data on other cache devices, or it has been
19  *   written back to the backing device
20  *
21  * - dirty bucket: owned_by_allocator == 0 &&
22  *                 dirty_sectors > 0
23  *   The bucket contains data that we must not discard (either only copy,
24  *   or one of the 'main copies' for data requiring multiple replicas)
25  *
26  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27  *   This is a btree node, journal or gen/prio bucket
28  *
29  * Lifecycle:
30  *
31  * bucket invalidated => bucket on freelist => open bucket =>
32  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
33  *
34  * Note that cache promotion can skip the dirty bucket step, as data
35  * is copied from a deeper tier to a shallower tier, onto a cached
36  * bucket.
37  * Note also that a cached bucket can spontaneously become dirty --
38  * see below.
39  *
40  * Only a traversal of the key space can determine whether a bucket is
41  * truly dirty or cached.
42  *
43  * Transitions:
44  *
45  * - free => allocator: bucket was invalidated
46  * - cached => allocator: bucket was invalidated
47  *
48  * - allocator => dirty: open bucket was filled up
49  * - allocator => cached: open bucket was filled up
50  * - allocator => metadata: metadata was allocated
51  *
52  * - dirty => cached: dirty sectors were copied to a deeper tier
53  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54  * - cached => free: cached sectors were overwritten
55  *
56  * - metadata => free: metadata was freed
57  *
58  * Oddities:
59  * - cached => dirty: a device was removed so formerly replicated data
60  *                    is no longer sufficiently replicated
61  * - free => cached: cannot happen
62  * - free => dirty: cannot happen
63  * - free => metadata: cannot happen
64  */
65
66 #include "bcachefs.h"
67 #include "alloc_background.h"
68 #include "bset.h"
69 #include "btree_gc.h"
70 #include "btree_update.h"
71 #include "buckets.h"
72 #include "ec.h"
73 #include "error.h"
74 #include "movinggc.h"
75 #include "replicas.h"
76
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
79
80 /*
81  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
82  * wraparound:
83  */
84 void bch2_bucket_seq_cleanup(struct bch_fs *c)
85 {
86         u64 journal_seq = atomic64_read(&c->journal.seq);
87         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
88         struct bch_dev *ca;
89         struct bucket_array *buckets;
90         struct bucket *g;
91         struct bucket_mark m;
92         unsigned i;
93
94         if (journal_seq - c->last_bucket_seq_cleanup <
95             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
96                 return;
97
98         c->last_bucket_seq_cleanup = journal_seq;
99
100         for_each_member_device(ca, c, i) {
101                 down_read(&ca->bucket_lock);
102                 buckets = bucket_array(ca);
103
104                 for_each_bucket(g, buckets) {
105                         bucket_cmpxchg(g, m, ({
106                                 if (!m.journal_seq_valid ||
107                                     bucket_needs_journal_commit(m, last_seq_ondisk))
108                                         break;
109
110                                 m.journal_seq_valid = 0;
111                         }));
112                 }
113                 up_read(&ca->bucket_lock);
114         }
115 }
116
117 void bch2_fs_usage_initialize(struct bch_fs *c)
118 {
119         struct bch_fs_usage *usage;
120         unsigned i;
121
122         percpu_down_write(&c->mark_lock);
123         usage = c->usage_base;
124
125         bch2_fs_usage_acc_to_base(c, 0);
126         bch2_fs_usage_acc_to_base(c, 1);
127
128         for (i = 0; i < BCH_REPLICAS_MAX; i++)
129                 usage->reserved += usage->persistent_reserved[i];
130
131         for (i = 0; i < c->replicas.nr; i++) {
132                 struct bch_replicas_entry *e =
133                         cpu_replicas_entry(&c->replicas, i);
134
135                 switch (e->data_type) {
136                 case BCH_DATA_btree:
137                         usage->btree    += usage->replicas[i];
138                         break;
139                 case BCH_DATA_user:
140                         usage->data     += usage->replicas[i];
141                         break;
142                 case BCH_DATA_cached:
143                         usage->cached   += usage->replicas[i];
144                         break;
145                 }
146         }
147
148         percpu_up_write(&c->mark_lock);
149 }
150
151 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
152 {
153         if (fs_usage == c->usage_scratch)
154                 mutex_unlock(&c->usage_scratch_lock);
155         else
156                 kfree(fs_usage);
157 }
158
159 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
160 {
161         struct bch_fs_usage *ret;
162         unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
163
164         ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
165         if (ret)
166                 return ret;
167
168         if (mutex_trylock(&c->usage_scratch_lock))
169                 goto out_pool;
170
171         ret = kzalloc(bytes, GFP_NOFS);
172         if (ret)
173                 return ret;
174
175         mutex_lock(&c->usage_scratch_lock);
176 out_pool:
177         ret = c->usage_scratch;
178         memset(ret, 0, bytes);
179         return ret;
180 }
181
182 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
183 {
184         struct bch_dev_usage ret;
185
186         memset(&ret, 0, sizeof(ret));
187         acc_u64s_percpu((u64 *) &ret,
188                         (u64 __percpu *) ca->usage[0],
189                         sizeof(ret) / sizeof(u64));
190
191         return ret;
192 }
193
194 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
195                                                 unsigned journal_seq,
196                                                 bool gc)
197 {
198         return this_cpu_ptr(gc
199                             ? c->usage_gc
200                             : c->usage[journal_seq & 1]);
201 }
202
203 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
204 {
205         ssize_t offset = v - (u64 *) c->usage_base;
206         unsigned seq;
207         u64 ret;
208
209         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
210         percpu_rwsem_assert_held(&c->mark_lock);
211
212         do {
213                 seq = read_seqcount_begin(&c->usage_lock);
214                 ret = *v +
215                         percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
216                         percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
217         } while (read_seqcount_retry(&c->usage_lock, seq));
218
219         return ret;
220 }
221
222 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
223 {
224         struct bch_fs_usage *ret;
225         unsigned seq, v, u64s = fs_usage_u64s(c);
226 retry:
227         ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
228         if (unlikely(!ret))
229                 return NULL;
230
231         percpu_down_read(&c->mark_lock);
232
233         v = fs_usage_u64s(c);
234         if (unlikely(u64s != v)) {
235                 u64s = v;
236                 percpu_up_read(&c->mark_lock);
237                 kfree(ret);
238                 goto retry;
239         }
240
241         do {
242                 seq = read_seqcount_begin(&c->usage_lock);
243                 memcpy(ret, c->usage_base, u64s * sizeof(u64));
244                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
245                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[1], u64s);
246         } while (read_seqcount_retry(&c->usage_lock, seq));
247
248         return ret;
249 }
250
251 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
252 {
253         unsigned u64s = fs_usage_u64s(c);
254
255         BUG_ON(idx >= 2);
256
257         preempt_disable();
258         write_seqcount_begin(&c->usage_lock);
259
260         acc_u64s_percpu((u64 *) c->usage_base,
261                         (u64 __percpu *) c->usage[idx], u64s);
262         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
263
264         write_seqcount_end(&c->usage_lock);
265         preempt_enable();
266 }
267
268 void bch2_fs_usage_to_text(struct printbuf *out,
269                            struct bch_fs *c,
270                            struct bch_fs_usage *fs_usage)
271 {
272         unsigned i;
273
274         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
275
276         pr_buf(out, "hidden:\t\t\t\t%llu\n",
277                fs_usage->hidden);
278         pr_buf(out, "data:\t\t\t\t%llu\n",
279                fs_usage->data);
280         pr_buf(out, "cached:\t\t\t\t%llu\n",
281                fs_usage->cached);
282         pr_buf(out, "reserved:\t\t\t%llu\n",
283                fs_usage->reserved);
284         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
285                fs_usage->nr_inodes);
286         pr_buf(out, "online reserved:\t\t%llu\n",
287                fs_usage->online_reserved);
288
289         for (i = 0;
290              i < ARRAY_SIZE(fs_usage->persistent_reserved);
291              i++) {
292                 pr_buf(out, "%u replicas:\n", i + 1);
293                 pr_buf(out, "\treserved:\t\t%llu\n",
294                        fs_usage->persistent_reserved[i]);
295         }
296
297         for (i = 0; i < c->replicas.nr; i++) {
298                 struct bch_replicas_entry *e =
299                         cpu_replicas_entry(&c->replicas, i);
300
301                 pr_buf(out, "\t");
302                 bch2_replicas_entry_to_text(out, e);
303                 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
304         }
305 }
306
307 #define RESERVE_FACTOR  6
308
309 static u64 reserve_factor(u64 r)
310 {
311         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
312 }
313
314 static u64 avail_factor(u64 r)
315 {
316         return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
317 }
318
319 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
320 {
321         return min(fs_usage->hidden +
322                    fs_usage->btree +
323                    fs_usage->data +
324                    reserve_factor(fs_usage->reserved +
325                                   fs_usage->online_reserved),
326                    c->capacity);
327 }
328
329 static struct bch_fs_usage_short
330 __bch2_fs_usage_read_short(struct bch_fs *c)
331 {
332         struct bch_fs_usage_short ret;
333         u64 data, reserved;
334
335         ret.capacity = c->capacity -
336                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
337
338         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
339                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
340         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
341                 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
342
343         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
344         ret.free        = ret.capacity - ret.used;
345
346         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
347
348         return ret;
349 }
350
351 struct bch_fs_usage_short
352 bch2_fs_usage_read_short(struct bch_fs *c)
353 {
354         struct bch_fs_usage_short ret;
355
356         percpu_down_read(&c->mark_lock);
357         ret = __bch2_fs_usage_read_short(c);
358         percpu_up_read(&c->mark_lock);
359
360         return ret;
361 }
362
363 static inline int is_unavailable_bucket(struct bucket_mark m)
364 {
365         return !is_available_bucket(m);
366 }
367
368 static inline int is_fragmented_bucket(struct bucket_mark m,
369                                        struct bch_dev *ca)
370 {
371         if (!m.owned_by_allocator &&
372             m.data_type == BCH_DATA_user &&
373             bucket_sectors_used(m))
374                 return max_t(int, 0, (int) ca->mi.bucket_size -
375                              bucket_sectors_used(m));
376         return 0;
377 }
378
379 static inline int bucket_stripe_sectors(struct bucket_mark m)
380 {
381         return m.stripe ? m.dirty_sectors : 0;
382 }
383
384 static inline enum bch_data_type bucket_type(struct bucket_mark m)
385 {
386         return m.cached_sectors && !m.dirty_sectors
387                 ? BCH_DATA_cached
388                 : m.data_type;
389 }
390
391 static bool bucket_became_unavailable(struct bucket_mark old,
392                                       struct bucket_mark new)
393 {
394         return is_available_bucket(old) &&
395                !is_available_bucket(new);
396 }
397
398 int bch2_fs_usage_apply(struct bch_fs *c,
399                         struct bch_fs_usage *fs_usage,
400                         struct disk_reservation *disk_res,
401                         unsigned journal_seq)
402 {
403         s64 added = fs_usage->data + fs_usage->reserved;
404         s64 should_not_have_added;
405         int ret = 0;
406
407         percpu_rwsem_assert_held(&c->mark_lock);
408
409         /*
410          * Not allowed to reduce sectors_available except by getting a
411          * reservation:
412          */
413         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
414         if (WARN_ONCE(should_not_have_added > 0,
415                       "disk usage increased by %lli without a reservation",
416                       should_not_have_added)) {
417                 atomic64_sub(should_not_have_added, &c->sectors_available);
418                 added -= should_not_have_added;
419                 ret = -1;
420         }
421
422         if (added > 0) {
423                 disk_res->sectors               -= added;
424                 fs_usage->online_reserved       -= added;
425         }
426
427         preempt_disable();
428         acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
429                  (u64 *) fs_usage, fs_usage_u64s(c));
430         preempt_enable();
431
432         return ret;
433 }
434
435 static inline void account_bucket(struct bch_fs_usage *fs_usage,
436                                   struct bch_dev_usage *dev_usage,
437                                   enum bch_data_type type,
438                                   int nr, s64 size)
439 {
440         if (type == BCH_DATA_sb || type == BCH_DATA_journal)
441                 fs_usage->hidden        += size;
442
443         dev_usage->buckets[type]        += nr;
444 }
445
446 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
447                                   struct bch_fs_usage *fs_usage,
448                                   struct bucket_mark old, struct bucket_mark new,
449                                   bool gc)
450 {
451         struct bch_dev_usage *u;
452
453         percpu_rwsem_assert_held(&c->mark_lock);
454
455         preempt_disable();
456         u = this_cpu_ptr(ca->usage[gc]);
457
458         if (bucket_type(old))
459                 account_bucket(fs_usage, u, bucket_type(old),
460                                -1, -ca->mi.bucket_size);
461
462         if (bucket_type(new))
463                 account_bucket(fs_usage, u, bucket_type(new),
464                                1, ca->mi.bucket_size);
465
466         u->buckets_alloc +=
467                 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
468         u->buckets_unavailable +=
469                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
470
471         u->buckets_ec += (int) new.stripe - (int) old.stripe;
472         u->sectors_ec += bucket_stripe_sectors(new) -
473                          bucket_stripe_sectors(old);
474
475         u->sectors[old.data_type] -= old.dirty_sectors;
476         u->sectors[new.data_type] += new.dirty_sectors;
477         u->sectors[BCH_DATA_cached] +=
478                 (int) new.cached_sectors - (int) old.cached_sectors;
479         u->sectors_fragmented +=
480                 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
481         preempt_enable();
482
483         if (!is_available_bucket(old) && is_available_bucket(new))
484                 bch2_wake_allocator(ca);
485 }
486
487 __flatten
488 void bch2_dev_usage_from_buckets(struct bch_fs *c)
489 {
490         struct bch_dev *ca;
491         struct bucket_mark old = { .v.counter = 0 };
492         struct bucket_array *buckets;
493         struct bucket *g;
494         unsigned i;
495         int cpu;
496
497         c->usage_base->hidden = 0;
498
499         for_each_member_device(ca, c, i) {
500                 for_each_possible_cpu(cpu)
501                         memset(per_cpu_ptr(ca->usage[0], cpu), 0,
502                                sizeof(*ca->usage[0]));
503
504                 buckets = bucket_array(ca);
505
506                 for_each_bucket(g, buckets)
507                         bch2_dev_usage_update(c, ca, c->usage_base,
508                                               old, g->mark, false);
509         }
510 }
511
512 static inline int update_replicas(struct bch_fs *c,
513                                   struct bch_fs_usage *fs_usage,
514                                   struct bch_replicas_entry *r,
515                                   s64 sectors)
516 {
517         int idx = bch2_replicas_entry_idx(c, r);
518
519         if (idx < 0)
520                 return -1;
521
522         if (!fs_usage)
523                 return 0;
524
525         switch (r->data_type) {
526         case BCH_DATA_btree:
527                 fs_usage->btree         += sectors;
528                 break;
529         case BCH_DATA_user:
530                 fs_usage->data          += sectors;
531                 break;
532         case BCH_DATA_cached:
533                 fs_usage->cached        += sectors;
534                 break;
535         }
536         fs_usage->replicas[idx]         += sectors;
537         return 0;
538 }
539
540 static inline void update_cached_sectors(struct bch_fs *c,
541                                          struct bch_fs_usage *fs_usage,
542                                          unsigned dev, s64 sectors)
543 {
544         struct bch_replicas_padded r;
545
546         bch2_replicas_entry_cached(&r.e, dev);
547
548         update_replicas(c, fs_usage, &r.e, sectors);
549 }
550
551 static struct replicas_delta_list *
552 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
553 {
554         struct replicas_delta_list *d = trans->fs_usage_deltas;
555         unsigned new_size = d ? (d->size + more) * 2 : 128;
556
557         if (!d || d->used + more > d->size) {
558                 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
559                 BUG_ON(!d);
560
561                 d->size = new_size;
562                 trans->fs_usage_deltas = d;
563         }
564         return d;
565 }
566
567 static inline void update_replicas_list(struct btree_trans *trans,
568                                         struct bch_replicas_entry *r,
569                                         s64 sectors)
570 {
571         struct replicas_delta_list *d;
572         struct replicas_delta *n;
573         unsigned b;
574
575         if (!sectors)
576                 return;
577
578         b = replicas_entry_bytes(r) + 8;
579         d = replicas_deltas_realloc(trans, b);
580
581         n = (void *) d->d + d->used;
582         n->delta = sectors;
583         memcpy(&n->r, r, replicas_entry_bytes(r));
584         d->used += b;
585 }
586
587 static inline void update_cached_sectors_list(struct btree_trans *trans,
588                                               unsigned dev, s64 sectors)
589 {
590         struct bch_replicas_padded r;
591
592         bch2_replicas_entry_cached(&r.e, dev);
593
594         update_replicas_list(trans, &r.e, sectors);
595 }
596
597 static inline struct replicas_delta *
598 replicas_delta_next(struct replicas_delta *d)
599 {
600         return (void *) d + replicas_entry_bytes(&d->r) + 8;
601 }
602
603 int bch2_replicas_delta_list_apply(struct bch_fs *c,
604                                    struct bch_fs_usage *fs_usage,
605                                    struct replicas_delta_list *r)
606 {
607         struct replicas_delta *d = r->d;
608         struct replicas_delta *top = (void *) r->d + r->used;
609         unsigned i;
610
611         for (d = r->d; d != top; d = replicas_delta_next(d))
612                 if (update_replicas(c, fs_usage, &d->r, d->delta)) {
613                         top = d;
614                         goto unwind;
615                 }
616
617         if (!fs_usage)
618                 return 0;
619
620         fs_usage->nr_inodes += r->nr_inodes;
621
622         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
623                 fs_usage->reserved += r->persistent_reserved[i];
624                 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
625         }
626
627         return 0;
628 unwind:
629         for (d = r->d; d != top; d = replicas_delta_next(d))
630                 update_replicas(c, fs_usage, &d->r, -d->delta);
631         return -1;
632 }
633
634 #define do_mark_fn(fn, c, pos, flags, ...)                              \
635 ({                                                                      \
636         int gc, ret = 0;                                                \
637                                                                         \
638         percpu_rwsem_assert_held(&c->mark_lock);                        \
639                                                                         \
640         for (gc = 0; gc < 2 && !ret; gc++)                              \
641                 if (!gc == !(flags & BTREE_TRIGGER_GC) ||               \
642                     (gc && gc_visited(c, pos)))                         \
643                         ret = fn(c, __VA_ARGS__, gc);                   \
644         ret;                                                            \
645 })
646
647 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
648                                     size_t b, struct bucket_mark *ret,
649                                     bool gc)
650 {
651         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
652         struct bucket *g = __bucket(ca, b, gc);
653         struct bucket_mark old, new;
654
655         old = bucket_cmpxchg(g, new, ({
656                 BUG_ON(!is_available_bucket(new));
657
658                 new.owned_by_allocator  = true;
659                 new.data_type           = 0;
660                 new.cached_sectors      = 0;
661                 new.dirty_sectors       = 0;
662                 new.gen++;
663         }));
664
665         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
666
667         if (old.cached_sectors)
668                 update_cached_sectors(c, fs_usage, ca->dev_idx,
669                                       -((s64) old.cached_sectors));
670
671         if (!gc)
672                 *ret = old;
673         return 0;
674 }
675
676 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
677                             size_t b, struct bucket_mark *old)
678 {
679         do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
680                    ca, b, old);
681
682         if (!old->owned_by_allocator && old->cached_sectors)
683                 trace_invalidate(ca, bucket_to_sector(ca, b),
684                                  old->cached_sectors);
685 }
686
687 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
688                                     size_t b, bool owned_by_allocator,
689                                     bool gc)
690 {
691         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
692         struct bucket *g = __bucket(ca, b, gc);
693         struct bucket_mark old, new;
694
695         old = bucket_cmpxchg(g, new, ({
696                 new.owned_by_allocator  = owned_by_allocator;
697         }));
698
699         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
700
701         BUG_ON(!gc &&
702                !owned_by_allocator && !old.owned_by_allocator);
703
704         return 0;
705 }
706
707 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
708                             size_t b, bool owned_by_allocator,
709                             struct gc_pos pos, unsigned flags)
710 {
711         preempt_disable();
712
713         do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
714                    ca, b, owned_by_allocator);
715
716         preempt_enable();
717 }
718
719 static int bch2_mark_alloc(struct bch_fs *c,
720                            struct bkey_s_c old, struct bkey_s_c new,
721                            struct bch_fs_usage *fs_usage,
722                            u64 journal_seq, unsigned flags)
723 {
724         bool gc = flags & BTREE_TRIGGER_GC;
725         struct bkey_alloc_unpacked u;
726         struct bch_dev *ca;
727         struct bucket *g;
728         struct bucket_mark old_m, m;
729
730         /* We don't do anything for deletions - do we?: */
731         if (new.k->type != KEY_TYPE_alloc)
732                 return 0;
733
734         /*
735          * alloc btree is read in by bch2_alloc_read, not gc:
736          */
737         if ((flags & BTREE_TRIGGER_GC) &&
738             !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
739                 return 0;
740
741         ca = bch_dev_bkey_exists(c, new.k->p.inode);
742
743         if (new.k->p.offset >= ca->mi.nbuckets)
744                 return 0;
745
746         g = __bucket(ca, new.k->p.offset, gc);
747         u = bch2_alloc_unpack(new);
748
749         old_m = bucket_cmpxchg(g, m, ({
750                 m.gen                   = u.gen;
751                 m.data_type             = u.data_type;
752                 m.dirty_sectors         = u.dirty_sectors;
753                 m.cached_sectors        = u.cached_sectors;
754
755                 if (journal_seq) {
756                         m.journal_seq_valid     = 1;
757                         m.journal_seq           = journal_seq;
758                 }
759         }));
760
761         bch2_dev_usage_update(c, ca, fs_usage, old_m, m, gc);
762
763         g->io_time[READ]        = u.read_time;
764         g->io_time[WRITE]       = u.write_time;
765         g->oldest_gen           = u.oldest_gen;
766         g->gen_valid            = 1;
767
768         /*
769          * need to know if we're getting called from the invalidate path or
770          * not:
771          */
772
773         if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
774             old_m.cached_sectors) {
775                 update_cached_sectors(c, fs_usage, ca->dev_idx,
776                                       -old_m.cached_sectors);
777                 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
778                                  old_m.cached_sectors);
779         }
780
781         return 0;
782 }
783
784 #define checked_add(a, b)                                       \
785 ({                                                              \
786         unsigned _res = (unsigned) (a) + (b);                   \
787         bool overflow = _res > U16_MAX;                         \
788         if (overflow)                                           \
789                 _res = U16_MAX;                                 \
790         (a) = _res;                                             \
791         overflow;                                               \
792 })
793
794 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
795                                        size_t b, enum bch_data_type data_type,
796                                        unsigned sectors, bool gc)
797 {
798         struct bucket *g = __bucket(ca, b, gc);
799         struct bucket_mark old, new;
800         bool overflow;
801
802         BUG_ON(data_type != BCH_DATA_sb &&
803                data_type != BCH_DATA_journal);
804
805         old = bucket_cmpxchg(g, new, ({
806                 new.data_type   = data_type;
807                 overflow = checked_add(new.dirty_sectors, sectors);
808         }));
809
810         bch2_fs_inconsistent_on(old.data_type &&
811                                 old.data_type != data_type, c,
812                 "different types of data in same bucket: %s, %s",
813                 bch2_data_types[old.data_type],
814                 bch2_data_types[data_type]);
815
816         bch2_fs_inconsistent_on(overflow, c,
817                 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
818                 ca->dev_idx, b, new.gen,
819                 bch2_data_types[old.data_type ?: data_type],
820                 old.dirty_sectors, sectors);
821
822         if (c)
823                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
824                                       old, new, gc);
825
826         return 0;
827 }
828
829 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
830                                size_t b, enum bch_data_type type,
831                                unsigned sectors, struct gc_pos pos,
832                                unsigned flags)
833 {
834         BUG_ON(type != BCH_DATA_sb &&
835                type != BCH_DATA_journal);
836
837         preempt_disable();
838
839         if (likely(c)) {
840                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
841                            ca, b, type, sectors);
842         } else {
843                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
844         }
845
846         preempt_enable();
847 }
848
849 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
850 {
851         return DIV_ROUND_UP(sectors * n, d);
852 }
853
854 static s64 __ptr_disk_sectors_delta(unsigned old_size,
855                                     unsigned offset, s64 delta,
856                                     unsigned flags,
857                                     unsigned n, unsigned d)
858 {
859         BUG_ON(!n || !d);
860
861         if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
862                 BUG_ON(offset + -delta > old_size);
863
864                 return -disk_sectors_scaled(n, d, old_size) +
865                         disk_sectors_scaled(n, d, offset) +
866                         disk_sectors_scaled(n, d, old_size - offset + delta);
867         } else if (flags & BTREE_TRIGGER_OVERWRITE) {
868                 BUG_ON(offset + -delta > old_size);
869
870                 return -disk_sectors_scaled(n, d, old_size) +
871                         disk_sectors_scaled(n, d, old_size + delta);
872         } else {
873                 return  disk_sectors_scaled(n, d, delta);
874         }
875 }
876
877 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
878                                   unsigned offset, s64 delta,
879                                   unsigned flags)
880 {
881         return __ptr_disk_sectors_delta(p.crc.live_size,
882                                         offset, delta, flags,
883                                         p.crc.compressed_size,
884                                         p.crc.uncompressed_size);
885 }
886
887 static void bucket_set_stripe(struct bch_fs *c,
888                               const struct bch_extent_ptr *ptr,
889                               struct bch_fs_usage *fs_usage,
890                               u64 journal_seq,
891                               unsigned flags,
892                               bool enabled)
893 {
894         bool gc = flags & BTREE_TRIGGER_GC;
895         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
896         struct bucket *g = PTR_BUCKET(ca, ptr, gc);
897         struct bucket_mark new, old;
898
899         old = bucket_cmpxchg(g, new, ({
900                 new.stripe                      = enabled;
901                 if (journal_seq) {
902                         new.journal_seq_valid   = 1;
903                         new.journal_seq         = journal_seq;
904                 }
905         }));
906
907         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
908
909         /*
910          * XXX write repair code for these, flag stripe as possibly bad
911          */
912         if (old.gen != ptr->gen)
913                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
914                               "stripe with stale pointer");
915 #if 0
916         /*
917          * We'd like to check for these, but these checks don't work
918          * yet:
919          */
920         if (old.stripe && enabled)
921                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
922                               "multiple stripes using same bucket");
923
924         if (!old.stripe && !enabled)
925                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
926                               "deleting stripe but bucket not marked as stripe bucket");
927 #endif
928 }
929
930 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
931                           struct extent_ptr_decoded p,
932                           s64 sectors, enum bch_data_type ptr_data_type,
933                           u8 bucket_gen, u8 *bucket_data_type,
934                           u16 *dirty_sectors, u16 *cached_sectors)
935 {
936         u16 *dst_sectors = !p.ptr.cached
937                 ? dirty_sectors
938                 : cached_sectors;
939         u16 orig_sectors = *dst_sectors;
940         char buf[200];
941
942         if (gen_after(p.ptr.gen, bucket_gen)) {
943                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
944                         "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
945                         "while marking %s",
946                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
947                         bucket_gen,
948                         bch2_data_types[*bucket_data_type ?: ptr_data_type],
949                         p.ptr.gen,
950                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
951                 return -EIO;
952         }
953
954         if (gen_cmp(bucket_gen, p.ptr.gen) > 96U) {
955                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
956                         "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
957                         "while marking %s",
958                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
959                         bucket_gen,
960                         bch2_data_types[*bucket_data_type ?: ptr_data_type],
961                         p.ptr.gen,
962                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
963                 return -EIO;
964         }
965
966         if (bucket_gen != p.ptr.gen && !p.ptr.cached) {
967                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
968                         "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
969                         "while marking %s",
970                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
971                         bucket_gen,
972                         bch2_data_types[*bucket_data_type ?: ptr_data_type],
973                         p.ptr.gen,
974                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
975                 return -EIO;
976         }
977
978         if (bucket_gen != p.ptr.gen)
979                 return 1;
980
981         if (*bucket_data_type && *bucket_data_type != ptr_data_type) {
982                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
983                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
984                         "while marking %s",
985                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
986                         bucket_gen,
987                         bch2_data_types[*bucket_data_type],
988                         bch2_data_types[ptr_data_type],
989                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
990                 return -EIO;
991         }
992
993         if (checked_add(*dst_sectors, sectors)) {
994                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
995                         "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
996                         "while marking %s",
997                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
998                         bucket_gen,
999                         bch2_data_types[*bucket_data_type ?: ptr_data_type],
1000                         orig_sectors, sectors,
1001                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
1002                 return -EIO;
1003         }
1004
1005         *bucket_data_type = *dirty_sectors || *cached_sectors
1006                 ? ptr_data_type : 0;
1007         return 0;
1008 }
1009
1010 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1011                              struct extent_ptr_decoded p,
1012                              s64 sectors, enum bch_data_type data_type,
1013                              struct bch_fs_usage *fs_usage,
1014                              u64 journal_seq, unsigned flags)
1015 {
1016         bool gc = flags & BTREE_TRIGGER_GC;
1017         struct bucket_mark old, new;
1018         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1019         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
1020         u8 bucket_data_type;
1021         u64 v;
1022         int ret;
1023
1024         v = atomic64_read(&g->_mark.v);
1025         do {
1026                 new.v.counter = old.v.counter = v;
1027                 bucket_data_type = new.data_type;
1028
1029                 ret = __mark_pointer(c, k, p, sectors, data_type, new.gen,
1030                                      &bucket_data_type,
1031                                      &new.dirty_sectors,
1032                                      &new.cached_sectors);
1033                 if (ret)
1034                         return ret;
1035
1036                 new.data_type = bucket_data_type;
1037
1038                 if (journal_seq) {
1039                         new.journal_seq_valid = 1;
1040                         new.journal_seq = journal_seq;
1041                 }
1042
1043                 if (flags & BTREE_TRIGGER_NOATOMIC) {
1044                         g->_mark = new;
1045                         break;
1046                 }
1047         } while ((v = atomic64_cmpxchg(&g->_mark.v,
1048                               old.v.counter,
1049                               new.v.counter)) != old.v.counter);
1050
1051         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1052
1053         BUG_ON(!gc && bucket_became_unavailable(old, new));
1054
1055         return 0;
1056 }
1057
1058 static int bch2_mark_stripe_ptr(struct bch_fs *c,
1059                                 struct bch_extent_stripe_ptr p,
1060                                 enum bch_data_type data_type,
1061                                 struct bch_fs_usage *fs_usage,
1062                                 s64 sectors, unsigned flags,
1063                                 struct bch_replicas_padded *r,
1064                                 unsigned *nr_data,
1065                                 unsigned *nr_parity)
1066 {
1067         bool gc = flags & BTREE_TRIGGER_GC;
1068         struct stripe *m;
1069         unsigned i, blocks_nonempty = 0;
1070
1071         m = genradix_ptr(&c->stripes[gc], p.idx);
1072
1073         spin_lock(&c->ec_stripes_heap_lock);
1074
1075         if (!m || !m->alive) {
1076                 spin_unlock(&c->ec_stripes_heap_lock);
1077                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1078                                     (u64) p.idx);
1079                 return -EIO;
1080         }
1081
1082         BUG_ON(m->r.e.data_type != data_type);
1083
1084         *nr_data        = m->nr_blocks - m->nr_redundant;
1085         *nr_parity      = m->nr_redundant;
1086         *r = m->r;
1087
1088         m->block_sectors[p.block] += sectors;
1089
1090         for (i = 0; i < m->nr_blocks; i++)
1091                 blocks_nonempty += m->block_sectors[i] != 0;
1092
1093         if (m->blocks_nonempty != blocks_nonempty) {
1094                 m->blocks_nonempty = blocks_nonempty;
1095                 if (!gc)
1096                         bch2_stripes_heap_update(c, m, p.idx);
1097         }
1098
1099         spin_unlock(&c->ec_stripes_heap_lock);
1100
1101         return 0;
1102 }
1103
1104 static int bch2_mark_extent(struct bch_fs *c,
1105                             struct bkey_s_c old, struct bkey_s_c new,
1106                             unsigned offset, s64 sectors,
1107                             enum bch_data_type data_type,
1108                             struct bch_fs_usage *fs_usage,
1109                             unsigned journal_seq, unsigned flags)
1110 {
1111         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1112         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1113         const union bch_extent_entry *entry;
1114         struct extent_ptr_decoded p;
1115         struct bch_replicas_padded r;
1116         s64 dirty_sectors = 0;
1117         bool stale;
1118         int ret;
1119
1120         r.e.data_type   = data_type;
1121         r.e.nr_devs     = 0;
1122         r.e.nr_required = 1;
1123
1124         BUG_ON(!sectors);
1125
1126         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1127                 s64 disk_sectors = data_type == BCH_DATA_btree
1128                         ? sectors
1129                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1130
1131                 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
1132                                         fs_usage, journal_seq, flags);
1133                 if (ret < 0)
1134                         return ret;
1135
1136                 stale = ret > 0;
1137
1138                 if (p.ptr.cached) {
1139                         if (!stale)
1140                                 update_cached_sectors(c, fs_usage, p.ptr.dev,
1141                                                       disk_sectors);
1142                 } else if (!p.has_ec) {
1143                         dirty_sectors          += disk_sectors;
1144                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1145                 } else {
1146                         struct bch_replicas_padded ec_r;
1147                         unsigned nr_data, nr_parity;
1148                         s64 parity_sectors;
1149
1150                         ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1151                                         fs_usage, disk_sectors, flags,
1152                                         &ec_r, &nr_data, &nr_parity);
1153                         if (ret)
1154                                 return ret;
1155
1156                         parity_sectors =
1157                                 __ptr_disk_sectors_delta(p.crc.live_size,
1158                                         offset, sectors, flags,
1159                                         p.crc.compressed_size * nr_parity,
1160                                         p.crc.uncompressed_size * nr_data);
1161
1162                         update_replicas(c, fs_usage, &ec_r.e,
1163                                         disk_sectors + parity_sectors);
1164
1165                         /*
1166                          * There may be other dirty pointers in this extent, but
1167                          * if so they're not required for mounting if we have an
1168                          * erasure coded pointer in this extent:
1169                          */
1170                         r.e.nr_required = 0;
1171                 }
1172         }
1173
1174         if (r.e.nr_devs)
1175                 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1176
1177         return 0;
1178 }
1179
1180 static int bch2_mark_stripe(struct bch_fs *c,
1181                             struct bkey_s_c old, struct bkey_s_c new,
1182                             struct bch_fs_usage *fs_usage,
1183                             u64 journal_seq, unsigned flags)
1184 {
1185         bool gc = flags & BTREE_TRIGGER_GC;
1186         size_t idx = new.k->p.offset;
1187         const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1188                 ? bkey_s_c_to_stripe(old).v : NULL;
1189         const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1190                 ? bkey_s_c_to_stripe(new).v : NULL;
1191         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1192         unsigned i;
1193
1194         if (!m || (old_s && !m->alive)) {
1195                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1196                                     idx);
1197                 return -1;
1198         }
1199
1200         if (!new_s) {
1201                 /* Deleting: */
1202                 for (i = 0; i < old_s->nr_blocks; i++)
1203                         bucket_set_stripe(c, old_s->ptrs + i, fs_usage,
1204                                           journal_seq, flags, false);
1205
1206                 if (!gc && m->on_heap) {
1207                         spin_lock(&c->ec_stripes_heap_lock);
1208                         bch2_stripes_heap_del(c, m, idx);
1209                         spin_unlock(&c->ec_stripes_heap_lock);
1210                 }
1211
1212                 memset(m, 0, sizeof(*m));
1213         } else {
1214                 BUG_ON(old_s && new_s->nr_blocks != old_s->nr_blocks);
1215                 BUG_ON(old_s && new_s->nr_redundant != old_s->nr_redundant);
1216
1217                 for (i = 0; i < new_s->nr_blocks; i++) {
1218                         if (!old_s ||
1219                             memcmp(new_s->ptrs + i,
1220                                    old_s->ptrs + i,
1221                                    sizeof(struct bch_extent_ptr))) {
1222
1223                                 if (old_s)
1224                                         bucket_set_stripe(c, old_s->ptrs + i, fs_usage,
1225                                                           journal_seq, flags, false);
1226                                 bucket_set_stripe(c, new_s->ptrs + i, fs_usage,
1227                                                   journal_seq, flags, true);
1228                         }
1229                 }
1230
1231                 m->alive        = true;
1232                 m->sectors      = le16_to_cpu(new_s->sectors);
1233                 m->algorithm    = new_s->algorithm;
1234                 m->nr_blocks    = new_s->nr_blocks;
1235                 m->nr_redundant = new_s->nr_redundant;
1236
1237                 bch2_bkey_to_replicas(&m->r.e, new);
1238
1239                 /* gc recalculates these fields: */
1240                 if (!(flags & BTREE_TRIGGER_GC)) {
1241                         m->blocks_nonempty = 0;
1242
1243                         for (i = 0; i < new_s->nr_blocks; i++) {
1244                                 m->block_sectors[i] =
1245                                         stripe_blockcount_get(new_s, i);
1246                                 m->blocks_nonempty += !!m->block_sectors[i];
1247                         }
1248                 }
1249
1250                 if (!gc) {
1251                         spin_lock(&c->ec_stripes_heap_lock);
1252                         bch2_stripes_heap_update(c, m, idx);
1253                         spin_unlock(&c->ec_stripes_heap_lock);
1254                 }
1255         }
1256
1257         return 0;
1258 }
1259
1260 static int bch2_mark_key_locked(struct bch_fs *c,
1261                    struct bkey_s_c old,
1262                    struct bkey_s_c new,
1263                    unsigned offset, s64 sectors,
1264                    struct bch_fs_usage *fs_usage,
1265                    u64 journal_seq, unsigned flags)
1266 {
1267         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1268         int ret = 0;
1269
1270         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1271
1272         preempt_disable();
1273
1274         if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1275                 fs_usage = fs_usage_ptr(c, journal_seq,
1276                                         flags & BTREE_TRIGGER_GC);
1277
1278         switch (k.k->type) {
1279         case KEY_TYPE_alloc:
1280                 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1281                 break;
1282         case KEY_TYPE_btree_ptr:
1283         case KEY_TYPE_btree_ptr_v2:
1284                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1285                         ?  c->opts.btree_node_size
1286                         : -c->opts.btree_node_size;
1287
1288                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1289                                 BCH_DATA_btree, fs_usage, journal_seq, flags);
1290                 break;
1291         case KEY_TYPE_extent:
1292         case KEY_TYPE_reflink_v:
1293                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1294                                 BCH_DATA_user, fs_usage, journal_seq, flags);
1295                 break;
1296         case KEY_TYPE_stripe:
1297                 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1298                 break;
1299         case KEY_TYPE_inode:
1300                 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1301                         fs_usage->nr_inodes++;
1302                 else
1303                         fs_usage->nr_inodes--;
1304                 break;
1305         case KEY_TYPE_reservation: {
1306                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1307
1308                 sectors *= replicas;
1309                 replicas = clamp_t(unsigned, replicas, 1,
1310                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1311
1312                 fs_usage->reserved                              += sectors;
1313                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1314                 break;
1315         }
1316         }
1317
1318         preempt_enable();
1319
1320         return ret;
1321 }
1322
1323 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1324                   unsigned offset, s64 sectors,
1325                   struct bch_fs_usage *fs_usage,
1326                   u64 journal_seq, unsigned flags)
1327 {
1328         struct bkey deleted;
1329         struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1330         int ret;
1331
1332         bkey_init(&deleted);
1333
1334         percpu_down_read(&c->mark_lock);
1335         ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1336                                    fs_usage, journal_seq,
1337                                    BTREE_TRIGGER_INSERT|flags);
1338         percpu_up_read(&c->mark_lock);
1339
1340         return ret;
1341 }
1342
1343 int bch2_mark_update(struct btree_trans *trans,
1344                      struct btree_iter *iter,
1345                      struct bkey_i *new,
1346                      struct bch_fs_usage *fs_usage,
1347                      unsigned flags)
1348 {
1349         struct bch_fs           *c = trans->c;
1350         struct btree            *b = iter_l(iter)->b;
1351         struct btree_node_iter  node_iter = iter_l(iter)->iter;
1352         struct bkey_packed      *_old;
1353         struct bkey_s_c         old;
1354         struct bkey             unpacked;
1355         int ret = 0;
1356
1357         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1358                 return 0;
1359
1360         if (!btree_node_type_needs_gc(iter->btree_id))
1361                 return 0;
1362
1363         bkey_init(&unpacked);
1364         old = (struct bkey_s_c) { &unpacked, NULL };
1365
1366         if (!btree_node_type_is_extents(iter->btree_id)) {
1367                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1368                         _old = bch2_btree_node_iter_peek(&node_iter, b);
1369                         if (_old)
1370                                 old = bkey_disassemble(b, _old, &unpacked);
1371                 } else {
1372                         struct bkey_cached *ck = (void *) iter->l[0].b;
1373
1374                         if (ck->valid)
1375                                 old = bkey_i_to_s_c(ck->k);
1376                 }
1377
1378                 if (old.k->type == new->k.type) {
1379                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1380                                 fs_usage, trans->journal_res.seq,
1381                                 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1382
1383                 } else {
1384                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1385                                 fs_usage, trans->journal_res.seq,
1386                                 BTREE_TRIGGER_INSERT|flags);
1387                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1388                                 fs_usage, trans->journal_res.seq,
1389                                 BTREE_TRIGGER_OVERWRITE|flags);
1390                 }
1391         } else {
1392                 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1393                 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1394                         0, new->k.size,
1395                         fs_usage, trans->journal_res.seq,
1396                         BTREE_TRIGGER_INSERT|flags);
1397
1398                 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
1399                         unsigned offset = 0;
1400                         s64 sectors;
1401
1402                         old = bkey_disassemble(b, _old, &unpacked);
1403                         sectors = -((s64) old.k->size);
1404
1405                         flags |= BTREE_TRIGGER_OVERWRITE;
1406
1407                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1408                                 return 0;
1409
1410                         switch (bch2_extent_overlap(&new->k, old.k)) {
1411                         case BCH_EXTENT_OVERLAP_ALL:
1412                                 offset = 0;
1413                                 sectors = -((s64) old.k->size);
1414                                 break;
1415                         case BCH_EXTENT_OVERLAP_BACK:
1416                                 offset = bkey_start_offset(&new->k) -
1417                                         bkey_start_offset(old.k);
1418                                 sectors = bkey_start_offset(&new->k) -
1419                                         old.k->p.offset;
1420                                 break;
1421                         case BCH_EXTENT_OVERLAP_FRONT:
1422                                 offset = 0;
1423                                 sectors = bkey_start_offset(old.k) -
1424                                         new->k.p.offset;
1425                                 break;
1426                         case BCH_EXTENT_OVERLAP_MIDDLE:
1427                                 offset = bkey_start_offset(&new->k) -
1428                                         bkey_start_offset(old.k);
1429                                 sectors = -((s64) new->k.size);
1430                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1431                                 break;
1432                         }
1433
1434                         BUG_ON(sectors >= 0);
1435
1436                         ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1437                                         offset, sectors, fs_usage,
1438                                         trans->journal_res.seq, flags) ?: 1;
1439                         if (ret <= 0)
1440                                 break;
1441
1442                         bch2_btree_node_iter_advance(&node_iter, b);
1443                 }
1444         }
1445
1446         return ret;
1447 }
1448
1449 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1450                                struct bch_fs_usage *fs_usage)
1451 {
1452         struct bch_fs *c = trans->c;
1453         struct btree_insert_entry *i;
1454         static int warned_disk_usage = 0;
1455         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1456         char buf[200];
1457
1458         if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1459                                  trans->journal_res.seq) ||
1460             warned_disk_usage ||
1461             xchg(&warned_disk_usage, 1))
1462                 return;
1463
1464         bch_err(c, "disk usage increased more than %llu sectors reserved",
1465                 disk_res_sectors);
1466
1467         trans_for_each_update(trans, i) {
1468                 pr_err("while inserting");
1469                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1470                 pr_err("%s", buf);
1471                 pr_err("overlapping with");
1472
1473                 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1474                         struct btree            *b = iter_l(i->iter)->b;
1475                         struct btree_node_iter  node_iter = iter_l(i->iter)->iter;
1476                         struct bkey_packed      *_k;
1477
1478                         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1479                                 struct bkey             unpacked;
1480                                 struct bkey_s_c         k;
1481
1482                                 pr_info("_k %px format %u", _k, _k->format);
1483                                 k = bkey_disassemble(b, _k, &unpacked);
1484
1485                                 if (btree_node_is_extents(b)
1486                                     ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1487                                     : bkey_cmp(i->k->k.p, k.k->p))
1488                                         break;
1489
1490                                 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1491                                 pr_err("%s", buf);
1492
1493                                 bch2_btree_node_iter_advance(&node_iter, b);
1494                         }
1495                 } else {
1496                         struct bkey_cached *ck = (void *) i->iter->l[0].b;
1497
1498                         if (ck->valid) {
1499                                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1500                                 pr_err("%s", buf);
1501                         }
1502                 }
1503         }
1504 }
1505
1506 /* trans_mark: */
1507
1508 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1509                             enum btree_id btree_id, struct bpos pos,
1510                             struct bkey_s_c *k)
1511 {
1512         struct btree_insert_entry *i;
1513
1514         trans_for_each_update(trans, i)
1515                 if (i->iter->btree_id == btree_id &&
1516                     (btree_node_type_is_extents(btree_id)
1517                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1518                        bkey_cmp(pos, i->k->k.p) < 0
1519                      : !bkey_cmp(pos, i->iter->pos))) {
1520                         *k = bkey_i_to_s_c(i->k);
1521                         return i->iter;
1522                 }
1523
1524         return NULL;
1525 }
1526
1527 static int trans_get_key(struct btree_trans *trans,
1528                          enum btree_id btree_id, struct bpos pos,
1529                          struct btree_iter **iter,
1530                          struct bkey_s_c *k)
1531 {
1532         unsigned flags = btree_id != BTREE_ID_ALLOC
1533                 ? BTREE_ITER_SLOTS
1534                 : BTREE_ITER_CACHED;
1535         int ret;
1536
1537         *iter = trans_get_update(trans, btree_id, pos, k);
1538         if (*iter)
1539                 return 1;
1540
1541         *iter = bch2_trans_get_iter(trans, btree_id, pos,
1542                                     flags|BTREE_ITER_INTENT);
1543         if (IS_ERR(*iter))
1544                 return PTR_ERR(*iter);
1545
1546         *k = __bch2_btree_iter_peek(*iter, flags);
1547         ret = bkey_err(*k);
1548         if (ret)
1549                 bch2_trans_iter_put(trans, *iter);
1550         return ret;
1551 }
1552
1553 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1554                         struct bkey_s_c k, struct extent_ptr_decoded p,
1555                         s64 sectors, enum bch_data_type data_type)
1556 {
1557         struct bch_fs *c = trans->c;
1558         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1559         struct bpos pos = POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr));
1560         struct btree_iter *iter;
1561         struct bkey_s_c k_a;
1562         struct bkey_alloc_unpacked u;
1563         struct bkey_i_alloc *a;
1564         struct bucket *g;
1565         int ret;
1566
1567         iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k_a);
1568         if (iter) {
1569                 u = bch2_alloc_unpack(k_a);
1570         } else {
1571                 iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
1572                                            BTREE_ITER_CACHED|
1573                                            BTREE_ITER_CACHED_NOFILL|
1574                                            BTREE_ITER_INTENT);
1575                 if (IS_ERR(iter))
1576                         return PTR_ERR(iter);
1577
1578                 ret = bch2_btree_iter_traverse(iter);
1579                 if (ret)
1580                         goto out;
1581
1582                 percpu_down_read(&c->mark_lock);
1583                 g = bucket(ca, pos.offset);
1584                 u = alloc_mem_to_key(g, READ_ONCE(g->mark));
1585                 percpu_up_read(&c->mark_lock);
1586         }
1587
1588         ret = __mark_pointer(c, k, p, sectors, data_type, u.gen, &u.data_type,
1589                              &u.dirty_sectors, &u.cached_sectors);
1590         if (ret)
1591                 goto out;
1592
1593         a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1594         ret = PTR_ERR_OR_ZERO(a);
1595         if (ret)
1596                 goto out;
1597
1598         bkey_alloc_init(&a->k_i);
1599         a->k.p = pos;
1600         bch2_alloc_pack(a, u);
1601         bch2_trans_update(trans, iter, &a->k_i, 0);
1602 out:
1603         bch2_trans_iter_put(trans, iter);
1604         return ret;
1605 }
1606
1607 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1608                         struct bch_extent_stripe_ptr p,
1609                         s64 sectors, enum bch_data_type data_type,
1610                         struct bch_replicas_padded *r,
1611                         unsigned *nr_data,
1612                         unsigned *nr_parity)
1613 {
1614         struct bch_fs *c = trans->c;
1615         struct btree_iter *iter;
1616         struct bkey_s_c k;
1617         struct bkey_i_stripe *s;
1618         int ret = 0;
1619
1620         ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1621         if (ret < 0)
1622                 return ret;
1623
1624         if (k.k->type != KEY_TYPE_stripe) {
1625                 bch2_fs_inconsistent(c,
1626                         "pointer to nonexistent stripe %llu",
1627                         (u64) p.idx);
1628                 ret = -EIO;
1629                 goto out;
1630         }
1631
1632         s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1633         ret = PTR_ERR_OR_ZERO(s);
1634         if (ret)
1635                 goto out;
1636
1637         bkey_reassemble(&s->k_i, k);
1638
1639         stripe_blockcount_set(&s->v, p.block,
1640                 stripe_blockcount_get(&s->v, p.block) +
1641                 sectors);
1642
1643         *nr_data        = s->v.nr_blocks - s->v.nr_redundant;
1644         *nr_parity      = s->v.nr_redundant;
1645         bch2_bkey_to_replicas(&r->e, bkey_i_to_s_c(&s->k_i));
1646         bch2_trans_update(trans, iter, &s->k_i, 0);
1647 out:
1648         bch2_trans_iter_put(trans, iter);
1649         return ret;
1650 }
1651
1652 static int bch2_trans_mark_extent(struct btree_trans *trans,
1653                         struct bkey_s_c k, unsigned offset,
1654                         s64 sectors, unsigned flags,
1655                         enum bch_data_type data_type)
1656 {
1657         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1658         const union bch_extent_entry *entry;
1659         struct extent_ptr_decoded p;
1660         struct bch_replicas_padded r;
1661         s64 dirty_sectors = 0;
1662         bool stale;
1663         int ret;
1664
1665         r.e.data_type   = data_type;
1666         r.e.nr_devs     = 0;
1667         r.e.nr_required = 1;
1668
1669         BUG_ON(!sectors);
1670
1671         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1672                 s64 disk_sectors = data_type == BCH_DATA_btree
1673                         ? sectors
1674                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1675
1676                 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1677                                               data_type);
1678                 if (ret < 0)
1679                         return ret;
1680
1681                 stale = ret > 0;
1682
1683                 if (p.ptr.cached) {
1684                         if (!stale)
1685                                 update_cached_sectors_list(trans, p.ptr.dev,
1686                                                            disk_sectors);
1687                 } else if (!p.has_ec) {
1688                         dirty_sectors          += disk_sectors;
1689                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1690                 } else {
1691                         struct bch_replicas_padded ec_r;
1692                         unsigned nr_data, nr_parity;
1693                         s64 parity_sectors;
1694
1695                         ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
1696                                         disk_sectors, data_type,
1697                                         &ec_r, &nr_data, &nr_parity);
1698                         if (ret)
1699                                 return ret;
1700
1701                         parity_sectors =
1702                                 __ptr_disk_sectors_delta(p.crc.live_size,
1703                                         offset, sectors, flags,
1704                                         p.crc.compressed_size * nr_parity,
1705                                         p.crc.uncompressed_size * nr_data);
1706
1707                         update_replicas_list(trans, &ec_r.e,
1708                                              disk_sectors + parity_sectors);
1709
1710                         r.e.nr_required = 0;
1711                 }
1712         }
1713
1714         if (r.e.nr_devs)
1715                 update_replicas_list(trans, &r.e, dirty_sectors);
1716
1717         return 0;
1718 }
1719
1720 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1721                         struct bkey_s_c_reflink_p p,
1722                         u64 idx, unsigned sectors,
1723                         unsigned flags)
1724 {
1725         struct bch_fs *c = trans->c;
1726         struct btree_iter *iter;
1727         struct bkey_s_c k;
1728         struct bkey_i_reflink_v *r_v;
1729         s64 ret;
1730
1731         ret = trans_get_key(trans, BTREE_ID_REFLINK,
1732                             POS(0, idx), &iter, &k);
1733         if (ret < 0)
1734                 return ret;
1735
1736         if (k.k->type != KEY_TYPE_reflink_v) {
1737                 bch2_fs_inconsistent(c,
1738                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1739                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1740                 ret = -EIO;
1741                 goto err;
1742         }
1743
1744         if ((flags & BTREE_TRIGGER_OVERWRITE) &&
1745             (bkey_start_offset(k.k) < idx ||
1746              k.k->p.offset > idx + sectors))
1747                 goto out;
1748
1749         sectors = k.k->p.offset - idx;
1750
1751         r_v = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1752         ret = PTR_ERR_OR_ZERO(r_v);
1753         if (ret)
1754                 goto err;
1755
1756         bkey_reassemble(&r_v->k_i, k);
1757
1758         le64_add_cpu(&r_v->v.refcount,
1759                      !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
1760
1761         if (!r_v->v.refcount) {
1762                 r_v->k.type = KEY_TYPE_deleted;
1763                 set_bkey_val_u64s(&r_v->k, 0);
1764         }
1765
1766         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1767         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1768
1769         bch2_trans_update(trans, iter, &r_v->k_i, 0);
1770 out:
1771         ret = sectors;
1772 err:
1773         bch2_trans_iter_put(trans, iter);
1774         return ret;
1775 }
1776
1777 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1778                         struct bkey_s_c_reflink_p p, unsigned offset,
1779                         s64 sectors, unsigned flags)
1780 {
1781         u64 idx = le64_to_cpu(p.v->idx) + offset;
1782         s64 ret = 0;
1783
1784         sectors = abs(sectors);
1785         BUG_ON(offset + sectors > p.k->size);
1786
1787         while (sectors) {
1788                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1789                 if (ret < 0)
1790                         break;
1791
1792                 idx += ret;
1793                 sectors = max_t(s64, 0LL, sectors - ret);
1794                 ret = 0;
1795         }
1796
1797         return ret;
1798 }
1799
1800 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
1801                         unsigned offset, s64 sectors, unsigned flags)
1802 {
1803         struct replicas_delta_list *d;
1804         struct bch_fs *c = trans->c;
1805
1806         switch (k.k->type) {
1807         case KEY_TYPE_btree_ptr:
1808         case KEY_TYPE_btree_ptr_v2:
1809                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1810                         ?  c->opts.btree_node_size
1811                         : -c->opts.btree_node_size;
1812
1813                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1814                                               flags, BCH_DATA_btree);
1815         case KEY_TYPE_extent:
1816         case KEY_TYPE_reflink_v:
1817                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1818                                               flags, BCH_DATA_user);
1819         case KEY_TYPE_inode:
1820                 d = replicas_deltas_realloc(trans, 0);
1821
1822                 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1823                         d->nr_inodes++;
1824                 else
1825                         d->nr_inodes--;
1826                 return 0;
1827         case KEY_TYPE_reservation: {
1828                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1829
1830                 d = replicas_deltas_realloc(trans, 0);
1831
1832                 sectors *= replicas;
1833                 replicas = clamp_t(unsigned, replicas, 1,
1834                                    ARRAY_SIZE(d->persistent_reserved));
1835
1836                 d->persistent_reserved[replicas - 1] += sectors;
1837                 return 0;
1838         }
1839         case KEY_TYPE_reflink_p:
1840                 return bch2_trans_mark_reflink_p(trans,
1841                                         bkey_s_c_to_reflink_p(k),
1842                                         offset, sectors, flags);
1843         default:
1844                 return 0;
1845         }
1846 }
1847
1848 int bch2_trans_mark_update(struct btree_trans *trans,
1849                            struct btree_iter *iter,
1850                            struct bkey_i *insert,
1851                            unsigned flags)
1852 {
1853         struct btree            *b = iter_l(iter)->b;
1854         struct btree_node_iter  node_iter = iter_l(iter)->iter;
1855         struct bkey_packed      *_k;
1856         int ret;
1857
1858         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1859                 return 0;
1860
1861         if (!btree_node_type_needs_gc(iter->btree_id))
1862                 return 0;
1863
1864         ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
1865                         0, insert->k.size, BTREE_TRIGGER_INSERT);
1866         if (ret)
1867                 return ret;
1868
1869         if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1870                 struct bkey_cached *ck = (void *) iter->l[0].b;
1871
1872                 return bch2_trans_mark_key(trans, bkey_i_to_s_c(ck->k),
1873                                            0, 0, BTREE_TRIGGER_OVERWRITE);
1874         }
1875
1876         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1877                 struct bkey             unpacked;
1878                 struct bkey_s_c         k;
1879                 unsigned                offset = 0;
1880                 s64                     sectors = 0;
1881                 unsigned                flags = BTREE_TRIGGER_OVERWRITE;
1882
1883                 k = bkey_disassemble(b, _k, &unpacked);
1884
1885                 if (btree_node_is_extents(b)
1886                     ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
1887                     : bkey_cmp(insert->k.p, k.k->p))
1888                         break;
1889
1890                 if (btree_node_is_extents(b)) {
1891                         switch (bch2_extent_overlap(&insert->k, k.k)) {
1892                         case BCH_EXTENT_OVERLAP_ALL:
1893                                 offset = 0;
1894                                 sectors = -((s64) k.k->size);
1895                                 break;
1896                         case BCH_EXTENT_OVERLAP_BACK:
1897                                 offset = bkey_start_offset(&insert->k) -
1898                                         bkey_start_offset(k.k);
1899                                 sectors = bkey_start_offset(&insert->k) -
1900                                         k.k->p.offset;
1901                                 break;
1902                         case BCH_EXTENT_OVERLAP_FRONT:
1903                                 offset = 0;
1904                                 sectors = bkey_start_offset(k.k) -
1905                                         insert->k.p.offset;
1906                                 break;
1907                         case BCH_EXTENT_OVERLAP_MIDDLE:
1908                                 offset = bkey_start_offset(&insert->k) -
1909                                         bkey_start_offset(k.k);
1910                                 sectors = -((s64) insert->k.size);
1911                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1912                                 break;
1913                         }
1914
1915                         BUG_ON(sectors >= 0);
1916                 }
1917
1918                 ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
1919                 if (ret)
1920                         return ret;
1921
1922                 bch2_btree_node_iter_advance(&node_iter, b);
1923         }
1924
1925         return 0;
1926 }
1927
1928 /* Disk reservations: */
1929
1930 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
1931 {
1932         percpu_u64_set(&c->pcpu->sectors_available, 0);
1933
1934         return avail_factor(__bch2_fs_usage_read_short(c).free);
1935 }
1936
1937 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
1938 {
1939         percpu_down_read(&c->mark_lock);
1940         this_cpu_sub(c->usage[0]->online_reserved,
1941                      res->sectors);
1942         percpu_up_read(&c->mark_lock);
1943
1944         res->sectors = 0;
1945 }
1946
1947 #define SECTORS_CACHE   1024
1948
1949 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1950                               unsigned sectors, int flags)
1951 {
1952         struct bch_fs_pcpu *pcpu;
1953         u64 old, v, get;
1954         s64 sectors_available;
1955         int ret;
1956
1957         percpu_down_read(&c->mark_lock);
1958         preempt_disable();
1959         pcpu = this_cpu_ptr(c->pcpu);
1960
1961         if (sectors <= pcpu->sectors_available)
1962                 goto out;
1963
1964         v = atomic64_read(&c->sectors_available);
1965         do {
1966                 old = v;
1967                 get = min((u64) sectors + SECTORS_CACHE, old);
1968
1969                 if (get < sectors) {
1970                         preempt_enable();
1971                         percpu_up_read(&c->mark_lock);
1972                         goto recalculate;
1973                 }
1974         } while ((v = atomic64_cmpxchg(&c->sectors_available,
1975                                        old, old - get)) != old);
1976
1977         pcpu->sectors_available         += get;
1978
1979 out:
1980         pcpu->sectors_available         -= sectors;
1981         this_cpu_add(c->usage[0]->online_reserved, sectors);
1982         res->sectors                    += sectors;
1983
1984         preempt_enable();
1985         percpu_up_read(&c->mark_lock);
1986         return 0;
1987
1988 recalculate:
1989         percpu_down_write(&c->mark_lock);
1990
1991         sectors_available = bch2_recalc_sectors_available(c);
1992
1993         if (sectors <= sectors_available ||
1994             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1995                 atomic64_set(&c->sectors_available,
1996                              max_t(s64, 0, sectors_available - sectors));
1997                 this_cpu_add(c->usage[0]->online_reserved, sectors);
1998                 res->sectors                    += sectors;
1999                 ret = 0;
2000         } else {
2001                 atomic64_set(&c->sectors_available, sectors_available);
2002                 ret = -ENOSPC;
2003         }
2004
2005         percpu_up_write(&c->mark_lock);
2006
2007         return ret;
2008 }
2009
2010 /* Startup/shutdown: */
2011
2012 static void buckets_free_rcu(struct rcu_head *rcu)
2013 {
2014         struct bucket_array *buckets =
2015                 container_of(rcu, struct bucket_array, rcu);
2016
2017         kvpfree(buckets,
2018                 sizeof(struct bucket_array) +
2019                 buckets->nbuckets * sizeof(struct bucket));
2020 }
2021
2022 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2023 {
2024         struct bucket_array *buckets = NULL, *old_buckets = NULL;
2025         unsigned long *buckets_nouse = NULL;
2026         alloc_fifo      free[RESERVE_NR];
2027         alloc_fifo      free_inc;
2028         alloc_heap      alloc_heap;
2029
2030         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2031                              ca->mi.bucket_size / c->opts.btree_node_size);
2032         /* XXX: these should be tunable */
2033         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
2034         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 7);
2035         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
2036                                       btree_reserve * 2);
2037         bool resize = ca->buckets[0] != NULL;
2038         int ret = -ENOMEM;
2039         unsigned i;
2040
2041         memset(&free,           0, sizeof(free));
2042         memset(&free_inc,       0, sizeof(free_inc));
2043         memset(&alloc_heap,     0, sizeof(alloc_heap));
2044
2045         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
2046                                             nbuckets * sizeof(struct bucket),
2047                                             GFP_KERNEL|__GFP_ZERO)) ||
2048             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2049                                             sizeof(unsigned long),
2050                                             GFP_KERNEL|__GFP_ZERO)) ||
2051             !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
2052             !init_fifo(&free[RESERVE_MOVINGGC],
2053                        copygc_reserve, GFP_KERNEL) ||
2054             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2055             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
2056             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2057                 goto err;
2058
2059         buckets->first_bucket   = ca->mi.first_bucket;
2060         buckets->nbuckets       = nbuckets;
2061
2062         bch2_copygc_stop(c);
2063
2064         if (resize) {
2065                 down_write(&c->gc_lock);
2066                 down_write(&ca->bucket_lock);
2067                 percpu_down_write(&c->mark_lock);
2068         }
2069
2070         old_buckets = bucket_array(ca);
2071
2072         if (resize) {
2073                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2074
2075                 memcpy(buckets->b,
2076                        old_buckets->b,
2077                        n * sizeof(struct bucket));
2078                 memcpy(buckets_nouse,
2079                        ca->buckets_nouse,
2080                        BITS_TO_LONGS(n) * sizeof(unsigned long));
2081         }
2082
2083         rcu_assign_pointer(ca->buckets[0], buckets);
2084         buckets = old_buckets;
2085
2086         swap(ca->buckets_nouse, buckets_nouse);
2087
2088         if (resize) {
2089                 percpu_up_write(&c->mark_lock);
2090                 up_write(&c->gc_lock);
2091         }
2092
2093         spin_lock(&c->freelist_lock);
2094         for (i = 0; i < RESERVE_NR; i++) {
2095                 fifo_move(&free[i], &ca->free[i]);
2096                 swap(ca->free[i], free[i]);
2097         }
2098         fifo_move(&free_inc, &ca->free_inc);
2099         swap(ca->free_inc, free_inc);
2100         spin_unlock(&c->freelist_lock);
2101
2102         /* with gc lock held, alloc_heap can't be in use: */
2103         swap(ca->alloc_heap, alloc_heap);
2104
2105         nbuckets = ca->mi.nbuckets;
2106
2107         if (resize)
2108                 up_write(&ca->bucket_lock);
2109
2110         ret = 0;
2111 err:
2112         free_heap(&alloc_heap);
2113         free_fifo(&free_inc);
2114         for (i = 0; i < RESERVE_NR; i++)
2115                 free_fifo(&free[i]);
2116         kvpfree(buckets_nouse,
2117                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2118         if (buckets)
2119                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2120
2121         return ret;
2122 }
2123
2124 void bch2_dev_buckets_free(struct bch_dev *ca)
2125 {
2126         unsigned i;
2127
2128         free_heap(&ca->alloc_heap);
2129         free_fifo(&ca->free_inc);
2130         for (i = 0; i < RESERVE_NR; i++)
2131                 free_fifo(&ca->free[i]);
2132         kvpfree(ca->buckets_nouse,
2133                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2134         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2135                 sizeof(struct bucket_array) +
2136                 ca->mi.nbuckets * sizeof(struct bucket));
2137
2138         free_percpu(ca->usage[0]);
2139 }
2140
2141 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2142 {
2143         if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2144                 return -ENOMEM;
2145
2146         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2147 }