]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
Update bcachefs sources to 7e03c1ab0e bcachefs: Kill bchfs_extent_update()
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  *
7  * Bucket states:
8  * - free bucket: mark == 0
9  *   The bucket contains no data and will not be read
10  *
11  * - allocator bucket: owned_by_allocator == 1
12  *   The bucket is on a free list, or it is an open bucket
13  *
14  * - cached bucket: owned_by_allocator == 0 &&
15  *                  dirty_sectors == 0 &&
16  *                  cached_sectors > 0
17  *   The bucket contains data but may be safely discarded as there are
18  *   enough replicas of the data on other cache devices, or it has been
19  *   written back to the backing device
20  *
21  * - dirty bucket: owned_by_allocator == 0 &&
22  *                 dirty_sectors > 0
23  *   The bucket contains data that we must not discard (either only copy,
24  *   or one of the 'main copies' for data requiring multiple replicas)
25  *
26  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27  *   This is a btree node, journal or gen/prio bucket
28  *
29  * Lifecycle:
30  *
31  * bucket invalidated => bucket on freelist => open bucket =>
32  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
33  *
34  * Note that cache promotion can skip the dirty bucket step, as data
35  * is copied from a deeper tier to a shallower tier, onto a cached
36  * bucket.
37  * Note also that a cached bucket can spontaneously become dirty --
38  * see below.
39  *
40  * Only a traversal of the key space can determine whether a bucket is
41  * truly dirty or cached.
42  *
43  * Transitions:
44  *
45  * - free => allocator: bucket was invalidated
46  * - cached => allocator: bucket was invalidated
47  *
48  * - allocator => dirty: open bucket was filled up
49  * - allocator => cached: open bucket was filled up
50  * - allocator => metadata: metadata was allocated
51  *
52  * - dirty => cached: dirty sectors were copied to a deeper tier
53  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54  * - cached => free: cached sectors were overwritten
55  *
56  * - metadata => free: metadata was freed
57  *
58  * Oddities:
59  * - cached => dirty: a device was removed so formerly replicated data
60  *                    is no longer sufficiently replicated
61  * - free => cached: cannot happen
62  * - free => dirty: cannot happen
63  * - free => metadata: cannot happen
64  */
65
66 #include "bcachefs.h"
67 #include "alloc_background.h"
68 #include "bset.h"
69 #include "btree_gc.h"
70 #include "btree_update.h"
71 #include "buckets.h"
72 #include "ec.h"
73 #include "error.h"
74 #include "movinggc.h"
75 #include "replicas.h"
76
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
79
80 /*
81  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
82  * wraparound:
83  */
84 void bch2_bucket_seq_cleanup(struct bch_fs *c)
85 {
86         u64 journal_seq = atomic64_read(&c->journal.seq);
87         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
88         struct bch_dev *ca;
89         struct bucket_array *buckets;
90         struct bucket *g;
91         struct bucket_mark m;
92         unsigned i;
93
94         if (journal_seq - c->last_bucket_seq_cleanup <
95             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
96                 return;
97
98         c->last_bucket_seq_cleanup = journal_seq;
99
100         for_each_member_device(ca, c, i) {
101                 down_read(&ca->bucket_lock);
102                 buckets = bucket_array(ca);
103
104                 for_each_bucket(g, buckets) {
105                         bucket_cmpxchg(g, m, ({
106                                 if (!m.journal_seq_valid ||
107                                     bucket_needs_journal_commit(m, last_seq_ondisk))
108                                         break;
109
110                                 m.journal_seq_valid = 0;
111                         }));
112                 }
113                 up_read(&ca->bucket_lock);
114         }
115 }
116
117 void bch2_fs_usage_initialize(struct bch_fs *c)
118 {
119         struct bch_fs_usage *usage;
120         unsigned i;
121
122         percpu_down_write(&c->mark_lock);
123         usage = c->usage_base;
124
125         bch2_fs_usage_acc_to_base(c, 0);
126         bch2_fs_usage_acc_to_base(c, 1);
127
128         for (i = 0; i < BCH_REPLICAS_MAX; i++)
129                 usage->reserved += usage->persistent_reserved[i];
130
131         for (i = 0; i < c->replicas.nr; i++) {
132                 struct bch_replicas_entry *e =
133                         cpu_replicas_entry(&c->replicas, i);
134
135                 switch (e->data_type) {
136                 case BCH_DATA_BTREE:
137                         usage->btree    += usage->replicas[i];
138                         break;
139                 case BCH_DATA_USER:
140                         usage->data     += usage->replicas[i];
141                         break;
142                 case BCH_DATA_CACHED:
143                         usage->cached   += usage->replicas[i];
144                         break;
145                 }
146         }
147
148         percpu_up_write(&c->mark_lock);
149 }
150
151 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
152 {
153         if (fs_usage == c->usage_scratch)
154                 mutex_unlock(&c->usage_scratch_lock);
155         else
156                 kfree(fs_usage);
157 }
158
159 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
160 {
161         struct bch_fs_usage *ret;
162         unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
163
164         ret = kzalloc(bytes, GFP_NOWAIT);
165         if (ret)
166                 return ret;
167
168         if (mutex_trylock(&c->usage_scratch_lock))
169                 goto out_pool;
170
171         ret = kzalloc(bytes, GFP_NOFS);
172         if (ret)
173                 return ret;
174
175         mutex_lock(&c->usage_scratch_lock);
176 out_pool:
177         ret = c->usage_scratch;
178         memset(ret, 0, bytes);
179         return ret;
180 }
181
182 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
183 {
184         struct bch_dev_usage ret;
185
186         memset(&ret, 0, sizeof(ret));
187         acc_u64s_percpu((u64 *) &ret,
188                         (u64 __percpu *) ca->usage[0],
189                         sizeof(ret) / sizeof(u64));
190
191         return ret;
192 }
193
194 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
195                                                 unsigned journal_seq,
196                                                 bool gc)
197 {
198         return this_cpu_ptr(gc
199                             ? c->usage_gc
200                             : c->usage[journal_seq & 1]);
201 }
202
203 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
204 {
205         ssize_t offset = v - (u64 *) c->usage_base;
206         unsigned seq;
207         u64 ret;
208
209         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
210         percpu_rwsem_assert_held(&c->mark_lock);
211
212         do {
213                 seq = read_seqcount_begin(&c->usage_lock);
214                 ret = *v +
215                         percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
216                         percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
217         } while (read_seqcount_retry(&c->usage_lock, seq));
218
219         return ret;
220 }
221
222 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
223 {
224         struct bch_fs_usage *ret;
225         unsigned seq, v, u64s = fs_usage_u64s(c);
226 retry:
227         ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
228         if (unlikely(!ret))
229                 return NULL;
230
231         percpu_down_read(&c->mark_lock);
232
233         v = fs_usage_u64s(c);
234         if (unlikely(u64s != v)) {
235                 u64s = v;
236                 percpu_up_read(&c->mark_lock);
237                 kfree(ret);
238                 goto retry;
239         }
240
241         do {
242                 seq = read_seqcount_begin(&c->usage_lock);
243                 memcpy(ret, c->usage_base, u64s * sizeof(u64));
244                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
245                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[1], u64s);
246         } while (read_seqcount_retry(&c->usage_lock, seq));
247
248         return ret;
249 }
250
251 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
252 {
253         unsigned u64s = fs_usage_u64s(c);
254
255         BUG_ON(idx >= 2);
256
257         write_seqcount_begin(&c->usage_lock);
258
259         acc_u64s_percpu((u64 *) c->usage_base,
260                         (u64 __percpu *) c->usage[idx], u64s);
261         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
262
263         write_seqcount_end(&c->usage_lock);
264 }
265
266 void bch2_fs_usage_to_text(struct printbuf *out,
267                            struct bch_fs *c,
268                            struct bch_fs_usage *fs_usage)
269 {
270         unsigned i;
271
272         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
273
274         pr_buf(out, "hidden:\t\t\t\t%llu\n",
275                fs_usage->hidden);
276         pr_buf(out, "data:\t\t\t\t%llu\n",
277                fs_usage->data);
278         pr_buf(out, "cached:\t\t\t\t%llu\n",
279                fs_usage->cached);
280         pr_buf(out, "reserved:\t\t\t%llu\n",
281                fs_usage->reserved);
282         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
283                fs_usage->nr_inodes);
284         pr_buf(out, "online reserved:\t\t%llu\n",
285                fs_usage->online_reserved);
286
287         for (i = 0;
288              i < ARRAY_SIZE(fs_usage->persistent_reserved);
289              i++) {
290                 pr_buf(out, "%u replicas:\n", i + 1);
291                 pr_buf(out, "\treserved:\t\t%llu\n",
292                        fs_usage->persistent_reserved[i]);
293         }
294
295         for (i = 0; i < c->replicas.nr; i++) {
296                 struct bch_replicas_entry *e =
297                         cpu_replicas_entry(&c->replicas, i);
298
299                 pr_buf(out, "\t");
300                 bch2_replicas_entry_to_text(out, e);
301                 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
302         }
303 }
304
305 #define RESERVE_FACTOR  6
306
307 static u64 reserve_factor(u64 r)
308 {
309         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
310 }
311
312 static u64 avail_factor(u64 r)
313 {
314         return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
315 }
316
317 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
318 {
319         return min(fs_usage->hidden +
320                    fs_usage->btree +
321                    fs_usage->data +
322                    reserve_factor(fs_usage->reserved +
323                                   fs_usage->online_reserved),
324                    c->capacity);
325 }
326
327 static struct bch_fs_usage_short
328 __bch2_fs_usage_read_short(struct bch_fs *c)
329 {
330         struct bch_fs_usage_short ret;
331         u64 data, reserved;
332
333         ret.capacity = c->capacity -
334                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
335
336         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
337                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
338         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
339                 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
340
341         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
342         ret.free        = ret.capacity - ret.used;
343
344         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
345
346         return ret;
347 }
348
349 struct bch_fs_usage_short
350 bch2_fs_usage_read_short(struct bch_fs *c)
351 {
352         struct bch_fs_usage_short ret;
353
354         percpu_down_read(&c->mark_lock);
355         ret = __bch2_fs_usage_read_short(c);
356         percpu_up_read(&c->mark_lock);
357
358         return ret;
359 }
360
361 static inline int is_unavailable_bucket(struct bucket_mark m)
362 {
363         return !is_available_bucket(m);
364 }
365
366 static inline int is_fragmented_bucket(struct bucket_mark m,
367                                        struct bch_dev *ca)
368 {
369         if (!m.owned_by_allocator &&
370             m.data_type == BCH_DATA_USER &&
371             bucket_sectors_used(m))
372                 return max_t(int, 0, (int) ca->mi.bucket_size -
373                              bucket_sectors_used(m));
374         return 0;
375 }
376
377 static inline enum bch_data_type bucket_type(struct bucket_mark m)
378 {
379         return m.cached_sectors && !m.dirty_sectors
380                 ? BCH_DATA_CACHED
381                 : m.data_type;
382 }
383
384 static bool bucket_became_unavailable(struct bucket_mark old,
385                                       struct bucket_mark new)
386 {
387         return is_available_bucket(old) &&
388                !is_available_bucket(new);
389 }
390
391 int bch2_fs_usage_apply(struct bch_fs *c,
392                         struct bch_fs_usage *fs_usage,
393                         struct disk_reservation *disk_res,
394                         unsigned journal_seq)
395 {
396         s64 added = fs_usage->data + fs_usage->reserved;
397         s64 should_not_have_added;
398         int ret = 0;
399
400         percpu_rwsem_assert_held(&c->mark_lock);
401
402         /*
403          * Not allowed to reduce sectors_available except by getting a
404          * reservation:
405          */
406         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
407         if (WARN_ONCE(should_not_have_added > 0,
408                       "disk usage increased by %lli without a reservation",
409                       should_not_have_added)) {
410                 atomic64_sub(should_not_have_added, &c->sectors_available);
411                 added -= should_not_have_added;
412                 ret = -1;
413         }
414
415         if (added > 0) {
416                 disk_res->sectors               -= added;
417                 fs_usage->online_reserved       -= added;
418         }
419
420         preempt_disable();
421         acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
422                  (u64 *) fs_usage, fs_usage_u64s(c));
423         preempt_enable();
424
425         return ret;
426 }
427
428 static inline void account_bucket(struct bch_fs_usage *fs_usage,
429                                   struct bch_dev_usage *dev_usage,
430                                   enum bch_data_type type,
431                                   int nr, s64 size)
432 {
433         if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
434                 fs_usage->hidden        += size;
435
436         dev_usage->buckets[type]        += nr;
437 }
438
439 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
440                                   struct bch_fs_usage *fs_usage,
441                                   struct bucket_mark old, struct bucket_mark new,
442                                   bool gc)
443 {
444         struct bch_dev_usage *dev_usage;
445
446         percpu_rwsem_assert_held(&c->mark_lock);
447
448         preempt_disable();
449         dev_usage = this_cpu_ptr(ca->usage[gc]);
450
451         if (bucket_type(old))
452                 account_bucket(fs_usage, dev_usage, bucket_type(old),
453                                -1, -ca->mi.bucket_size);
454
455         if (bucket_type(new))
456                 account_bucket(fs_usage, dev_usage, bucket_type(new),
457                                1, ca->mi.bucket_size);
458
459         dev_usage->buckets_alloc +=
460                 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
461         dev_usage->buckets_ec +=
462                 (int) new.stripe - (int) old.stripe;
463         dev_usage->buckets_unavailable +=
464                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
465
466         dev_usage->sectors[old.data_type] -= old.dirty_sectors;
467         dev_usage->sectors[new.data_type] += new.dirty_sectors;
468         dev_usage->sectors[BCH_DATA_CACHED] +=
469                 (int) new.cached_sectors - (int) old.cached_sectors;
470         dev_usage->sectors_fragmented +=
471                 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
472         preempt_enable();
473
474         if (!is_available_bucket(old) && is_available_bucket(new))
475                 bch2_wake_allocator(ca);
476 }
477
478 void bch2_dev_usage_from_buckets(struct bch_fs *c)
479 {
480         struct bch_dev *ca;
481         struct bucket_mark old = { .v.counter = 0 };
482         struct bucket_array *buckets;
483         struct bucket *g;
484         unsigned i;
485         int cpu;
486
487         c->usage_base->hidden = 0;
488
489         for_each_member_device(ca, c, i) {
490                 for_each_possible_cpu(cpu)
491                         memset(per_cpu_ptr(ca->usage[0], cpu), 0,
492                                sizeof(*ca->usage[0]));
493
494                 buckets = bucket_array(ca);
495
496                 for_each_bucket(g, buckets)
497                         bch2_dev_usage_update(c, ca, c->usage_base,
498                                               old, g->mark, false);
499         }
500 }
501
502 static inline void update_replicas(struct bch_fs *c,
503                                    struct bch_fs_usage *fs_usage,
504                                    struct bch_replicas_entry *r,
505                                    s64 sectors)
506 {
507         int idx = bch2_replicas_entry_idx(c, r);
508
509         BUG_ON(idx < 0);
510
511         switch (r->data_type) {
512         case BCH_DATA_BTREE:
513                 fs_usage->btree         += sectors;
514                 break;
515         case BCH_DATA_USER:
516                 fs_usage->data          += sectors;
517                 break;
518         case BCH_DATA_CACHED:
519                 fs_usage->cached        += sectors;
520                 break;
521         }
522         fs_usage->replicas[idx]         += sectors;
523 }
524
525 static inline void update_cached_sectors(struct bch_fs *c,
526                                          struct bch_fs_usage *fs_usage,
527                                          unsigned dev, s64 sectors)
528 {
529         struct bch_replicas_padded r;
530
531         bch2_replicas_entry_cached(&r.e, dev);
532
533         update_replicas(c, fs_usage, &r.e, sectors);
534 }
535
536 static struct replicas_delta_list *
537 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
538 {
539         struct replicas_delta_list *d = trans->fs_usage_deltas;
540         unsigned new_size = d ? (d->size + more) * 2 : 128;
541
542         if (!d || d->used + more > d->size) {
543                 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
544                 BUG_ON(!d);
545
546                 d->size = new_size;
547                 trans->fs_usage_deltas = d;
548         }
549         return d;
550 }
551
552 static inline void update_replicas_list(struct btree_trans *trans,
553                                         struct bch_replicas_entry *r,
554                                         s64 sectors)
555 {
556         struct replicas_delta_list *d;
557         struct replicas_delta *n;
558         unsigned b;
559
560         if (!sectors)
561                 return;
562
563         b = replicas_entry_bytes(r) + 8;
564         d = replicas_deltas_realloc(trans, b);
565
566         n = (void *) d->d + d->used;
567         n->delta = sectors;
568         memcpy(&n->r, r, replicas_entry_bytes(r));
569         d->used += b;
570 }
571
572 static inline void update_cached_sectors_list(struct btree_trans *trans,
573                                               unsigned dev, s64 sectors)
574 {
575         struct bch_replicas_padded r;
576
577         bch2_replicas_entry_cached(&r.e, dev);
578
579         update_replicas_list(trans, &r.e, sectors);
580 }
581
582 void bch2_replicas_delta_list_apply(struct bch_fs *c,
583                                     struct bch_fs_usage *fs_usage,
584                                     struct replicas_delta_list *r)
585 {
586         struct replicas_delta *d = r->d;
587         struct replicas_delta *top = (void *) r->d + r->used;
588
589         acc_u64s((u64 *) fs_usage,
590                  (u64 *) &r->fs_usage, sizeof(*fs_usage) / sizeof(u64));
591
592         while (d != top) {
593                 BUG_ON((void *) d > (void *) top);
594
595                 update_replicas(c, fs_usage, &d->r, d->delta);
596
597                 d = (void *) d + replicas_entry_bytes(&d->r) + 8;
598         }
599 }
600
601 #define do_mark_fn(fn, c, pos, flags, ...)                              \
602 ({                                                                      \
603         int gc, ret = 0;                                                \
604                                                                         \
605         percpu_rwsem_assert_held(&c->mark_lock);                        \
606                                                                         \
607         for (gc = 0; gc < 2 && !ret; gc++)                              \
608                 if (!gc == !(flags & BCH_BUCKET_MARK_GC) ||             \
609                     (gc && gc_visited(c, pos)))                         \
610                         ret = fn(c, __VA_ARGS__, gc);                   \
611         ret;                                                            \
612 })
613
614 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
615                                     size_t b, struct bucket_mark *ret,
616                                     bool gc)
617 {
618         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
619         struct bucket *g = __bucket(ca, b, gc);
620         struct bucket_mark old, new;
621
622         old = bucket_cmpxchg(g, new, ({
623                 BUG_ON(!is_available_bucket(new));
624
625                 new.owned_by_allocator  = true;
626                 new.data_type           = 0;
627                 new.cached_sectors      = 0;
628                 new.dirty_sectors       = 0;
629                 new.gen++;
630         }));
631
632         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
633
634         if (old.cached_sectors)
635                 update_cached_sectors(c, fs_usage, ca->dev_idx,
636                                       -((s64) old.cached_sectors));
637
638         if (!gc)
639                 *ret = old;
640         return 0;
641 }
642
643 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
644                             size_t b, struct bucket_mark *old)
645 {
646         do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
647                    ca, b, old);
648
649         if (!old->owned_by_allocator && old->cached_sectors)
650                 trace_invalidate(ca, bucket_to_sector(ca, b),
651                                  old->cached_sectors);
652 }
653
654 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
655                                     size_t b, bool owned_by_allocator,
656                                     bool gc)
657 {
658         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
659         struct bucket *g = __bucket(ca, b, gc);
660         struct bucket_mark old, new;
661
662         old = bucket_cmpxchg(g, new, ({
663                 new.owned_by_allocator  = owned_by_allocator;
664         }));
665
666         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
667
668         BUG_ON(!gc &&
669                !owned_by_allocator && !old.owned_by_allocator);
670
671         return 0;
672 }
673
674 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
675                             size_t b, bool owned_by_allocator,
676                             struct gc_pos pos, unsigned flags)
677 {
678         preempt_disable();
679
680         do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
681                    ca, b, owned_by_allocator);
682
683         preempt_enable();
684 }
685
686 static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
687                            struct bch_fs_usage *fs_usage,
688                            u64 journal_seq, unsigned flags)
689 {
690         bool gc = flags & BCH_BUCKET_MARK_GC;
691         struct bkey_alloc_unpacked u;
692         struct bch_dev *ca;
693         struct bucket *g;
694         struct bucket_mark old, m;
695
696         /*
697          * alloc btree is read in by bch2_alloc_read, not gc:
698          */
699         if ((flags & BCH_BUCKET_MARK_GC) &&
700             !(flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE))
701                 return 0;
702
703         ca = bch_dev_bkey_exists(c, k.k->p.inode);
704
705         if (k.k->p.offset >= ca->mi.nbuckets)
706                 return 0;
707
708         g = __bucket(ca, k.k->p.offset, gc);
709         u = bch2_alloc_unpack(k);
710
711         old = bucket_cmpxchg(g, m, ({
712                 m.gen                   = u.gen;
713                 m.data_type             = u.data_type;
714                 m.dirty_sectors         = u.dirty_sectors;
715                 m.cached_sectors        = u.cached_sectors;
716
717                 if (journal_seq) {
718                         m.journal_seq_valid     = 1;
719                         m.journal_seq           = journal_seq;
720                 }
721         }));
722
723         if (!(flags & BCH_BUCKET_MARK_ALLOC_READ))
724                 bch2_dev_usage_update(c, ca, fs_usage, old, m, gc);
725
726         g->io_time[READ]        = u.read_time;
727         g->io_time[WRITE]       = u.write_time;
728         g->oldest_gen           = u.oldest_gen;
729         g->gen_valid            = 1;
730
731         /*
732          * need to know if we're getting called from the invalidate path or
733          * not:
734          */
735
736         if ((flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE) &&
737             old.cached_sectors) {
738                 update_cached_sectors(c, fs_usage, ca->dev_idx,
739                                       -old.cached_sectors);
740                 trace_invalidate(ca, bucket_to_sector(ca, k.k->p.offset),
741                                  old.cached_sectors);
742         }
743
744         return 0;
745 }
746
747 #define checked_add(a, b)                                       \
748 ({                                                              \
749         unsigned _res = (unsigned) (a) + (b);                   \
750         bool overflow = _res > U16_MAX;                         \
751         if (overflow)                                           \
752                 _res = U16_MAX;                                 \
753         (a) = _res;                                             \
754         overflow;                                               \
755 })
756
757 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
758                                        size_t b, enum bch_data_type type,
759                                        unsigned sectors, bool gc)
760 {
761         struct bucket *g = __bucket(ca, b, gc);
762         struct bucket_mark old, new;
763         bool overflow;
764
765         BUG_ON(type != BCH_DATA_SB &&
766                type != BCH_DATA_JOURNAL);
767
768         old = bucket_cmpxchg(g, new, ({
769                 new.data_type   = type;
770                 overflow = checked_add(new.dirty_sectors, sectors);
771         }));
772
773         bch2_fs_inconsistent_on(old.data_type &&
774                                 old.data_type != type, c,
775                 "different types of data in same bucket: %s, %s",
776                 bch2_data_types[old.data_type],
777                 bch2_data_types[type]);
778
779         bch2_fs_inconsistent_on(overflow, c,
780                 "bucket sector count overflow: %u + %u > U16_MAX",
781                 old.dirty_sectors, sectors);
782
783         if (c)
784                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
785                                       old, new, gc);
786
787         return 0;
788 }
789
790 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
791                                size_t b, enum bch_data_type type,
792                                unsigned sectors, struct gc_pos pos,
793                                unsigned flags)
794 {
795         BUG_ON(type != BCH_DATA_SB &&
796                type != BCH_DATA_JOURNAL);
797
798         preempt_disable();
799
800         if (likely(c)) {
801                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
802                            ca, b, type, sectors);
803         } else {
804                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
805         }
806
807         preempt_enable();
808 }
809
810 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
811 {
812         return DIV_ROUND_UP(sectors * n, d);
813 }
814
815 static s64 __ptr_disk_sectors_delta(unsigned old_size,
816                                     unsigned offset, s64 delta,
817                                     unsigned flags,
818                                     unsigned n, unsigned d)
819 {
820         BUG_ON(!n || !d);
821
822         if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
823                 BUG_ON(offset + -delta > old_size);
824
825                 return -disk_sectors_scaled(n, d, old_size) +
826                         disk_sectors_scaled(n, d, offset) +
827                         disk_sectors_scaled(n, d, old_size - offset + delta);
828         } else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
829                 BUG_ON(offset + -delta > old_size);
830
831                 return -disk_sectors_scaled(n, d, old_size) +
832                         disk_sectors_scaled(n, d, old_size + delta);
833         } else {
834                 return  disk_sectors_scaled(n, d, delta);
835         }
836 }
837
838 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
839                                   unsigned offset, s64 delta,
840                                   unsigned flags)
841 {
842         return __ptr_disk_sectors_delta(p.crc.live_size,
843                                         offset, delta, flags,
844                                         p.crc.compressed_size,
845                                         p.crc.uncompressed_size);
846 }
847
848 static void bucket_set_stripe(struct bch_fs *c,
849                               const struct bch_stripe *v,
850                               struct bch_fs_usage *fs_usage,
851                               u64 journal_seq,
852                               unsigned flags)
853 {
854         bool enabled = !(flags & BCH_BUCKET_MARK_OVERWRITE);
855         bool gc = flags & BCH_BUCKET_MARK_GC;
856         unsigned i;
857
858         for (i = 0; i < v->nr_blocks; i++) {
859                 const struct bch_extent_ptr *ptr = v->ptrs + i;
860                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
861                 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
862                 struct bucket_mark new, old;
863
864                 old = bucket_cmpxchg(g, new, ({
865                         new.stripe                      = enabled;
866                         if (journal_seq) {
867                                 new.journal_seq_valid   = 1;
868                                 new.journal_seq         = journal_seq;
869                         }
870                 }));
871
872                 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
873
874                 /*
875                  * XXX write repair code for these, flag stripe as possibly bad
876                  */
877                 if (old.gen != ptr->gen)
878                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
879                                       "stripe with stale pointer");
880 #if 0
881                 /*
882                  * We'd like to check for these, but these checks don't work
883                  * yet:
884                  */
885                 if (old.stripe && enabled)
886                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
887                                       "multiple stripes using same bucket");
888
889                 if (!old.stripe && !enabled)
890                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
891                                       "deleting stripe but bucket not marked as stripe bucket");
892 #endif
893         }
894 }
895
896 static bool bch2_mark_pointer(struct bch_fs *c,
897                               struct extent_ptr_decoded p,
898                               s64 sectors, enum bch_data_type data_type,
899                               struct bch_fs_usage *fs_usage,
900                               u64 journal_seq, unsigned flags)
901 {
902         bool gc = flags & BCH_BUCKET_MARK_GC;
903         struct bucket_mark old, new;
904         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
905         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
906         bool overflow;
907         u64 v;
908
909         v = atomic64_read(&g->_mark.v);
910         do {
911                 new.v.counter = old.v.counter = v;
912
913                 /*
914                  * Check this after reading bucket mark to guard against
915                  * the allocator invalidating a bucket after we've already
916                  * checked the gen
917                  */
918                 if (gen_after(p.ptr.gen, new.gen)) {
919                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
920                                       "pointer gen in the future");
921                         return true;
922                 }
923
924                 if (new.gen != p.ptr.gen) {
925                         /* XXX write repair code for this */
926                         if (!p.ptr.cached &&
927                             test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
928                                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
929                                               "stale dirty pointer");
930                         return true;
931                 }
932
933                 if (!p.ptr.cached)
934                         overflow = checked_add(new.dirty_sectors, sectors);
935                 else
936                         overflow = checked_add(new.cached_sectors, sectors);
937
938                 if (!new.dirty_sectors &&
939                     !new.cached_sectors) {
940                         new.data_type   = 0;
941
942                         if (journal_seq) {
943                                 new.journal_seq_valid = 1;
944                                 new.journal_seq = journal_seq;
945                         }
946                 } else {
947                         new.data_type = data_type;
948                 }
949
950                 if (flags & BCH_BUCKET_MARK_NOATOMIC) {
951                         g->_mark = new;
952                         break;
953                 }
954         } while ((v = atomic64_cmpxchg(&g->_mark.v,
955                               old.v.counter,
956                               new.v.counter)) != old.v.counter);
957
958         if (old.data_type && old.data_type != data_type)
959                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
960                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s",
961                         p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
962                         new.gen,
963                         bch2_data_types[old.data_type],
964                         bch2_data_types[data_type]);
965
966         bch2_fs_inconsistent_on(overflow, c,
967                 "bucket sector count overflow: %u + %lli > U16_MAX",
968                 !p.ptr.cached
969                 ? old.dirty_sectors
970                 : old.cached_sectors, sectors);
971
972         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
973
974         BUG_ON(!gc && bucket_became_unavailable(old, new));
975
976         return false;
977 }
978
979 static int bch2_mark_stripe_ptr(struct bch_fs *c,
980                                 struct bch_extent_stripe_ptr p,
981                                 enum bch_data_type data_type,
982                                 struct bch_fs_usage *fs_usage,
983                                 s64 sectors, unsigned flags,
984                                 struct bch_replicas_padded *r,
985                                 unsigned *nr_data,
986                                 unsigned *nr_parity)
987 {
988         bool gc = flags & BCH_BUCKET_MARK_GC;
989         struct stripe *m;
990         unsigned old, new;
991         int blocks_nonempty_delta;
992
993         m = genradix_ptr(&c->stripes[gc], p.idx);
994
995         spin_lock(&c->ec_stripes_heap_lock);
996
997         if (!m || !m->alive) {
998                 spin_unlock(&c->ec_stripes_heap_lock);
999                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1000                                     (u64) p.idx);
1001                 return -EIO;
1002         }
1003
1004         BUG_ON(m->r.e.data_type != data_type);
1005
1006         *nr_data        = m->nr_blocks - m->nr_redundant;
1007         *nr_parity      = m->nr_redundant;
1008         *r = m->r;
1009
1010         old = m->block_sectors[p.block];
1011         m->block_sectors[p.block] += sectors;
1012         new = m->block_sectors[p.block];
1013
1014         blocks_nonempty_delta = (int) !!new - (int) !!old;
1015         if (blocks_nonempty_delta) {
1016                 m->blocks_nonempty += blocks_nonempty_delta;
1017
1018                 if (!gc)
1019                         bch2_stripes_heap_update(c, m, p.idx);
1020         }
1021
1022         m->dirty = true;
1023
1024         spin_unlock(&c->ec_stripes_heap_lock);
1025
1026         return 0;
1027 }
1028
1029 static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
1030                             unsigned offset, s64 sectors,
1031                             enum bch_data_type data_type,
1032                             struct bch_fs_usage *fs_usage,
1033                             unsigned journal_seq, unsigned flags)
1034 {
1035         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1036         const union bch_extent_entry *entry;
1037         struct extent_ptr_decoded p;
1038         struct bch_replicas_padded r;
1039         s64 dirty_sectors = 0;
1040         int ret;
1041
1042         r.e.data_type   = data_type;
1043         r.e.nr_devs     = 0;
1044         r.e.nr_required = 1;
1045
1046         BUG_ON(!sectors);
1047
1048         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1049                 s64 disk_sectors = data_type == BCH_DATA_BTREE
1050                         ? sectors
1051                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1052                 bool stale = bch2_mark_pointer(c, p, disk_sectors, data_type,
1053                                                fs_usage, journal_seq, flags);
1054
1055                 if (p.ptr.cached) {
1056                         if (!stale)
1057                                 update_cached_sectors(c, fs_usage, p.ptr.dev,
1058                                                       disk_sectors);
1059                 } else if (!p.has_ec) {
1060                         dirty_sectors          += disk_sectors;
1061                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1062                 } else {
1063                         struct bch_replicas_padded ec_r;
1064                         unsigned nr_data, nr_parity;
1065                         s64 parity_sectors;
1066
1067                         ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1068                                         fs_usage, disk_sectors, flags,
1069                                         &ec_r, &nr_data, &nr_parity);
1070                         if (ret)
1071                                 return ret;
1072
1073                         parity_sectors =
1074                                 __ptr_disk_sectors_delta(p.crc.live_size,
1075                                         offset, sectors, flags,
1076                                         p.crc.compressed_size * nr_parity,
1077                                         p.crc.uncompressed_size * nr_data);
1078
1079                         update_replicas(c, fs_usage, &ec_r.e,
1080                                         disk_sectors + parity_sectors);
1081
1082                         /*
1083                          * There may be other dirty pointers in this extent, but
1084                          * if so they're not required for mounting if we have an
1085                          * erasure coded pointer in this extent:
1086                          */
1087                         r.e.nr_required = 0;
1088                 }
1089         }
1090
1091         if (r.e.nr_devs)
1092                 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1093
1094         return 0;
1095 }
1096
1097 static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
1098                             struct bch_fs_usage *fs_usage,
1099                             u64 journal_seq, unsigned flags)
1100 {
1101         bool gc = flags & BCH_BUCKET_MARK_GC;
1102         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
1103         size_t idx = s.k->p.offset;
1104         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1105         unsigned i;
1106
1107         spin_lock(&c->ec_stripes_heap_lock);
1108
1109         if (!m || ((flags & BCH_BUCKET_MARK_OVERWRITE) && !m->alive)) {
1110                 spin_unlock(&c->ec_stripes_heap_lock);
1111                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1112                                     idx);
1113                 return -1;
1114         }
1115
1116         if (!(flags & BCH_BUCKET_MARK_OVERWRITE)) {
1117                 m->sectors      = le16_to_cpu(s.v->sectors);
1118                 m->algorithm    = s.v->algorithm;
1119                 m->nr_blocks    = s.v->nr_blocks;
1120                 m->nr_redundant = s.v->nr_redundant;
1121
1122                 bch2_bkey_to_replicas(&m->r.e, k);
1123
1124                 /*
1125                  * XXX: account for stripes somehow here
1126                  */
1127 #if 0
1128                 update_replicas(c, fs_usage, &m->r.e, stripe_sectors);
1129 #endif
1130
1131                 /* gc recalculates these fields: */
1132                 if (!(flags & BCH_BUCKET_MARK_GC)) {
1133                         for (i = 0; i < s.v->nr_blocks; i++) {
1134                                 m->block_sectors[i] =
1135                                         stripe_blockcount_get(s.v, i);
1136                                 m->blocks_nonempty += !!m->block_sectors[i];
1137                         }
1138                 }
1139
1140                 if (!gc)
1141                         bch2_stripes_heap_update(c, m, idx);
1142                 m->alive        = true;
1143         } else {
1144                 if (!gc)
1145                         bch2_stripes_heap_del(c, m, idx);
1146                 memset(m, 0, sizeof(*m));
1147         }
1148
1149         spin_unlock(&c->ec_stripes_heap_lock);
1150
1151         bucket_set_stripe(c, s.v, fs_usage, 0, flags);
1152         return 0;
1153 }
1154
1155 int bch2_mark_key_locked(struct bch_fs *c,
1156                    struct bkey_s_c k,
1157                    unsigned offset, s64 sectors,
1158                    struct bch_fs_usage *fs_usage,
1159                    u64 journal_seq, unsigned flags)
1160 {
1161         int ret = 0;
1162
1163         preempt_disable();
1164
1165         if (!fs_usage || (flags & BCH_BUCKET_MARK_GC))
1166                 fs_usage = fs_usage_ptr(c, journal_seq,
1167                                         flags & BCH_BUCKET_MARK_GC);
1168
1169         switch (k.k->type) {
1170         case KEY_TYPE_alloc:
1171                 ret = bch2_mark_alloc(c, k, fs_usage, journal_seq, flags);
1172                 break;
1173         case KEY_TYPE_btree_ptr:
1174                 sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
1175                         ?  c->opts.btree_node_size
1176                         : -c->opts.btree_node_size;
1177
1178                 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_BTREE,
1179                                 fs_usage, journal_seq, flags);
1180                 break;
1181         case KEY_TYPE_extent:
1182         case KEY_TYPE_reflink_v:
1183                 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_USER,
1184                                 fs_usage, journal_seq, flags);
1185                 break;
1186         case KEY_TYPE_stripe:
1187                 ret = bch2_mark_stripe(c, k, fs_usage, journal_seq, flags);
1188                 break;
1189         case KEY_TYPE_inode:
1190                 if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
1191                         fs_usage->nr_inodes++;
1192                 else
1193                         fs_usage->nr_inodes--;
1194                 break;
1195         case KEY_TYPE_reservation: {
1196                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1197
1198                 sectors *= replicas;
1199                 replicas = clamp_t(unsigned, replicas, 1,
1200                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1201
1202                 fs_usage->reserved                              += sectors;
1203                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1204                 break;
1205         }
1206         }
1207
1208         preempt_enable();
1209
1210         return ret;
1211 }
1212
1213 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
1214                   unsigned offset, s64 sectors,
1215                   struct bch_fs_usage *fs_usage,
1216                   u64 journal_seq, unsigned flags)
1217 {
1218         int ret;
1219
1220         percpu_down_read(&c->mark_lock);
1221         ret = bch2_mark_key_locked(c, k, offset, sectors,
1222                                    fs_usage, journal_seq, flags);
1223         percpu_up_read(&c->mark_lock);
1224
1225         return ret;
1226 }
1227
1228 inline int bch2_mark_overwrite(struct btree_trans *trans,
1229                                struct btree_iter *iter,
1230                                struct bkey_s_c old,
1231                                struct bkey_i *new,
1232                                struct bch_fs_usage *fs_usage,
1233                                unsigned flags)
1234 {
1235         struct bch_fs           *c = trans->c;
1236         struct btree            *b = iter->l[0].b;
1237         unsigned                offset = 0;
1238         s64                     sectors = 0;
1239
1240         flags |= BCH_BUCKET_MARK_OVERWRITE;
1241
1242         if (btree_node_is_extents(b)
1243             ? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
1244             : bkey_cmp(new->k.p, old.k->p))
1245                 return 0;
1246
1247         if (btree_node_is_extents(b)) {
1248                 switch (bch2_extent_overlap(&new->k, old.k)) {
1249                 case BCH_EXTENT_OVERLAP_ALL:
1250                         offset = 0;
1251                         sectors = -((s64) old.k->size);
1252                         break;
1253                 case BCH_EXTENT_OVERLAP_BACK:
1254                         offset = bkey_start_offset(&new->k) -
1255                                 bkey_start_offset(old.k);
1256                         sectors = bkey_start_offset(&new->k) -
1257                                 old.k->p.offset;
1258                         break;
1259                 case BCH_EXTENT_OVERLAP_FRONT:
1260                         offset = 0;
1261                         sectors = bkey_start_offset(old.k) -
1262                                 new->k.p.offset;
1263                         break;
1264                 case BCH_EXTENT_OVERLAP_MIDDLE:
1265                         offset = bkey_start_offset(&new->k) -
1266                                 bkey_start_offset(old.k);
1267                         sectors = -((s64) new->k.size);
1268                         flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
1269                         break;
1270                 }
1271
1272                 BUG_ON(sectors >= 0);
1273         }
1274
1275         return bch2_mark_key_locked(c, old, offset, sectors, fs_usage,
1276                                     trans->journal_res.seq, flags) ?: 1;
1277 }
1278
1279 int bch2_mark_update(struct btree_trans *trans,
1280                      struct btree_insert_entry *insert,
1281                      struct bch_fs_usage *fs_usage,
1282                      unsigned flags)
1283 {
1284         struct bch_fs           *c = trans->c;
1285         struct btree_iter       *iter = insert->iter;
1286         struct btree            *b = iter->l[0].b;
1287         struct btree_node_iter  node_iter = iter->l[0].iter;
1288         struct bkey_packed      *_k;
1289         int ret = 0;
1290
1291         if (!btree_node_type_needs_gc(iter->btree_id))
1292                 return 0;
1293
1294         bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k),
1295                 0, insert->k->k.size,
1296                 fs_usage, trans->journal_res.seq,
1297                 BCH_BUCKET_MARK_INSERT|flags);
1298
1299         if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
1300                 return 0;
1301
1302         /*
1303          * For non extents, we only mark the new key, not the key being
1304          * overwritten - unless we're actually deleting:
1305          */
1306         if ((iter->btree_id == BTREE_ID_ALLOC ||
1307              iter->btree_id == BTREE_ID_EC) &&
1308             !bkey_deleted(&insert->k->k))
1309                 return 0;
1310
1311         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1312                                                       KEY_TYPE_discard))) {
1313                 struct bkey             unpacked;
1314                 struct bkey_s_c         k = bkey_disassemble(b, _k, &unpacked);
1315
1316                 ret = bch2_mark_overwrite(trans, iter, k, insert->k,
1317                                           fs_usage, flags);
1318                 if (ret <= 0)
1319                         break;
1320
1321                 bch2_btree_node_iter_advance(&node_iter, b);
1322         }
1323
1324         return ret;
1325 }
1326
1327 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1328                                struct bch_fs_usage *fs_usage)
1329 {
1330         struct bch_fs *c = trans->c;
1331         struct btree_insert_entry *i;
1332         static int warned_disk_usage = 0;
1333         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1334         char buf[200];
1335
1336         if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1337                                  trans->journal_res.seq) ||
1338             warned_disk_usage ||
1339             xchg(&warned_disk_usage, 1))
1340                 return;
1341
1342         bch_err(c, "disk usage increased more than %llu sectors reserved",
1343                 disk_res_sectors);
1344
1345         trans_for_each_update(trans, i) {
1346                 struct btree_iter       *iter = i->iter;
1347                 struct btree            *b = iter->l[0].b;
1348                 struct btree_node_iter  node_iter = iter->l[0].iter;
1349                 struct bkey_packed      *_k;
1350
1351                 pr_err("while inserting");
1352                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1353                 pr_err("%s", buf);
1354                 pr_err("overlapping with");
1355
1356                 node_iter = iter->l[0].iter;
1357                 while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1358                                                         KEY_TYPE_discard))) {
1359                         struct bkey             unpacked;
1360                         struct bkey_s_c         k;
1361
1362                         k = bkey_disassemble(b, _k, &unpacked);
1363
1364                         if (btree_node_is_extents(b)
1365                             ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1366                             : bkey_cmp(i->k->k.p, k.k->p))
1367                                 break;
1368
1369                         bch2_bkey_val_to_text(&PBUF(buf), c, k);
1370                         pr_err("%s", buf);
1371
1372                         bch2_btree_node_iter_advance(&node_iter, b);
1373                 }
1374         }
1375 }
1376
1377 /* trans_mark: */
1378
1379 static int trans_get_key(struct btree_trans *trans,
1380                          enum btree_id btree_id, struct bpos pos,
1381                          struct btree_iter **iter,
1382                          struct bkey_s_c *k)
1383 {
1384         struct btree_insert_entry *i;
1385         int ret;
1386
1387         trans_for_each_update(trans, i)
1388                 if (i->iter->btree_id == btree_id &&
1389                     (btree_node_type_is_extents(btree_id)
1390                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1391                        bkey_cmp(pos, i->k->k.p) < 0
1392                      : !bkey_cmp(pos, i->iter->pos))) {
1393                         *iter   = i->iter;
1394                         *k      = bkey_i_to_s_c(i->k);
1395                         return 1;
1396                 }
1397
1398         *iter = bch2_trans_get_iter(trans, btree_id, pos,
1399                                     BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1400         if (IS_ERR(*iter))
1401                 return PTR_ERR(*iter);
1402
1403         *k = bch2_btree_iter_peek_slot(*iter);
1404         ret = bkey_err(*k);
1405         if (ret)
1406                 bch2_trans_iter_put(trans, *iter);
1407         return ret;
1408 }
1409
1410 static void *trans_update_key(struct btree_trans *trans,
1411                               struct btree_iter *iter,
1412                               unsigned u64s)
1413 {
1414         struct btree_insert_entry *i;
1415         struct bkey_i *new_k;
1416
1417         new_k = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
1418         if (IS_ERR(new_k))
1419                 return new_k;
1420
1421         bkey_init(&new_k->k);
1422         new_k->k.p = iter->pos;
1423
1424         trans_for_each_update(trans, i)
1425                 if (i->iter == iter) {
1426                         i->k = new_k;
1427                         return new_k;
1428                 }
1429
1430         bch2_trans_update(trans, iter, new_k);
1431         return new_k;
1432 }
1433
1434 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1435                         struct extent_ptr_decoded p,
1436                         s64 sectors, enum bch_data_type data_type)
1437 {
1438         struct bch_fs *c = trans->c;
1439         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1440         struct btree_iter *iter;
1441         struct bkey_s_c k;
1442         struct bkey_alloc_unpacked u;
1443         struct bkey_i_alloc *a;
1444         unsigned old;
1445         bool overflow;
1446         int ret;
1447
1448         ret = trans_get_key(trans, BTREE_ID_ALLOC,
1449                             POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr)),
1450                             &iter, &k);
1451         if (ret < 0)
1452                 return ret;
1453
1454         if (!ret) {
1455                 /*
1456                  * During journal replay, and if gc repairs alloc info at
1457                  * runtime, the alloc info in the btree might not be up to date
1458                  * yet - so, trust the in memory mark:
1459                  */
1460                 struct bucket *g;
1461                 struct bucket_mark m;
1462
1463                 percpu_down_read(&c->mark_lock);
1464                 g       = bucket(ca, iter->pos.offset);
1465                 m       = READ_ONCE(g->mark);
1466                 u       = alloc_mem_to_key(g, m);
1467                 percpu_up_read(&c->mark_lock);
1468         } else {
1469                 /*
1470                  * Unless we're already updating that key:
1471                  */
1472                 if (k.k->type != KEY_TYPE_alloc) {
1473                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1474                                       "pointer to nonexistent bucket %llu:%llu",
1475                                       iter->pos.inode, iter->pos.offset);
1476                         ret = -1;
1477                         goto out;
1478                 }
1479
1480                 u = bch2_alloc_unpack(k);
1481         }
1482
1483         if (gen_after(u.gen, p.ptr.gen)) {
1484                 ret = 1;
1485                 goto out;
1486         }
1487
1488         if (u.data_type && u.data_type != data_type) {
1489                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1490                         "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s",
1491                         iter->pos.inode, iter->pos.offset,
1492                         u.gen,
1493                         bch2_data_types[u.data_type],
1494                         bch2_data_types[data_type]);
1495                 ret = -1;
1496                 goto out;
1497         }
1498
1499         if (!p.ptr.cached) {
1500                 old = u.dirty_sectors;
1501                 overflow = checked_add(u.dirty_sectors, sectors);
1502         } else {
1503                 old = u.cached_sectors;
1504                 overflow = checked_add(u.cached_sectors, sectors);
1505         }
1506
1507         u.data_type = u.dirty_sectors || u.cached_sectors
1508                 ? data_type : 0;
1509
1510         bch2_fs_inconsistent_on(overflow, c,
1511                 "bucket sector count overflow: %u + %lli > U16_MAX",
1512                 old, sectors);
1513         BUG_ON(overflow);
1514
1515         a = trans_update_key(trans, iter, BKEY_ALLOC_U64s_MAX);
1516         ret = PTR_ERR_OR_ZERO(a);
1517         if (ret)
1518                 goto out;
1519
1520         bkey_alloc_init(&a->k_i);
1521         a->k.p = iter->pos;
1522         bch2_alloc_pack(a, u);
1523 out:
1524         bch2_trans_iter_put(trans, iter);
1525         return ret;
1526 }
1527
1528 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1529                         struct bch_extent_stripe_ptr p,
1530                         s64 sectors, enum bch_data_type data_type,
1531                         struct bch_replicas_padded *r,
1532                         unsigned *nr_data,
1533                         unsigned *nr_parity)
1534 {
1535         struct bch_fs *c = trans->c;
1536         struct btree_iter *iter;
1537         struct bkey_i *new_k;
1538         struct bkey_s_c k;
1539         struct bkey_s_stripe s;
1540         int ret = 0;
1541
1542         ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1543         if (ret < 0)
1544                 return ret;
1545
1546         if (k.k->type != KEY_TYPE_stripe) {
1547                 bch2_fs_inconsistent(c,
1548                         "pointer to nonexistent stripe %llu",
1549                         (u64) p.idx);
1550                 ret = -EIO;
1551                 goto out;
1552         }
1553
1554         new_k = trans_update_key(trans, iter, k.k->u64s);
1555         ret = PTR_ERR_OR_ZERO(new_k);
1556         if (ret)
1557                 goto out;
1558
1559         bkey_reassemble(new_k, k);
1560         s = bkey_i_to_s_stripe(new_k);
1561
1562         stripe_blockcount_set(s.v, p.block,
1563                 stripe_blockcount_get(s.v, p.block) +
1564                 sectors);
1565
1566         *nr_data        = s.v->nr_blocks - s.v->nr_redundant;
1567         *nr_parity      = s.v->nr_redundant;
1568         bch2_bkey_to_replicas(&r->e, s.s_c);
1569 out:
1570         bch2_trans_iter_put(trans, iter);
1571         return ret;
1572 }
1573
1574 static int bch2_trans_mark_extent(struct btree_trans *trans,
1575                         struct bkey_s_c k, unsigned offset,
1576                         s64 sectors, unsigned flags,
1577                         enum bch_data_type data_type)
1578 {
1579         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1580         const union bch_extent_entry *entry;
1581         struct extent_ptr_decoded p;
1582         struct bch_replicas_padded r;
1583         s64 dirty_sectors = 0;
1584         bool stale;
1585         int ret;
1586
1587         r.e.data_type   = data_type;
1588         r.e.nr_devs     = 0;
1589         r.e.nr_required = 1;
1590
1591         BUG_ON(!sectors);
1592
1593         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1594                 s64 disk_sectors = data_type == BCH_DATA_BTREE
1595                         ? sectors
1596                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1597
1598                 ret = bch2_trans_mark_pointer(trans, p, disk_sectors,
1599                                               data_type);
1600                 if (ret < 0)
1601                         return ret;
1602
1603                 stale = ret > 0;
1604
1605                 if (p.ptr.cached) {
1606                         if (!stale)
1607                                 update_cached_sectors_list(trans, p.ptr.dev,
1608                                                            disk_sectors);
1609                 } else if (!p.has_ec) {
1610                         dirty_sectors          += disk_sectors;
1611                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1612                 } else {
1613                         struct bch_replicas_padded ec_r;
1614                         unsigned nr_data, nr_parity;
1615                         s64 parity_sectors;
1616
1617                         ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
1618                                         disk_sectors, data_type,
1619                                         &ec_r, &nr_data, &nr_parity);
1620                         if (ret)
1621                                 return ret;
1622
1623                         parity_sectors =
1624                                 __ptr_disk_sectors_delta(p.crc.live_size,
1625                                         offset, sectors, flags,
1626                                         p.crc.compressed_size * nr_parity,
1627                                         p.crc.uncompressed_size * nr_data);
1628
1629                         update_replicas_list(trans, &ec_r.e,
1630                                              disk_sectors + parity_sectors);
1631
1632                         r.e.nr_required = 0;
1633                 }
1634         }
1635
1636         if (r.e.nr_devs)
1637                 update_replicas_list(trans, &r.e, dirty_sectors);
1638
1639         return 0;
1640 }
1641
1642 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1643                         struct bkey_s_c_reflink_p p,
1644                         u64 idx, unsigned sectors,
1645                         unsigned flags)
1646 {
1647         struct bch_fs *c = trans->c;
1648         struct btree_iter *iter;
1649         struct bkey_i *new_k;
1650         struct bkey_s_c k;
1651         struct bkey_i_reflink_v *r_v;
1652         s64 ret;
1653
1654         ret = trans_get_key(trans, BTREE_ID_REFLINK,
1655                             POS(0, idx), &iter, &k);
1656         if (ret < 0)
1657                 return ret;
1658
1659         if (k.k->type != KEY_TYPE_reflink_v) {
1660                 bch2_fs_inconsistent(c,
1661                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1662                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1663                 ret = -EIO;
1664                 goto err;
1665         }
1666
1667         if ((flags & BCH_BUCKET_MARK_OVERWRITE) &&
1668             (bkey_start_offset(k.k) < idx ||
1669              k.k->p.offset > idx + sectors))
1670                 goto out;
1671
1672         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1673         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1674
1675         new_k = trans_update_key(trans, iter, k.k->u64s);
1676         ret = PTR_ERR_OR_ZERO(new_k);
1677         if (ret)
1678                 goto err;
1679
1680         bkey_reassemble(new_k, k);
1681         r_v = bkey_i_to_reflink_v(new_k);
1682
1683         le64_add_cpu(&r_v->v.refcount,
1684                      !(flags & BCH_BUCKET_MARK_OVERWRITE) ? 1 : -1);
1685
1686         if (!r_v->v.refcount) {
1687                 r_v->k.type = KEY_TYPE_deleted;
1688                 set_bkey_val_u64s(&r_v->k, 0);
1689         }
1690 out:
1691         ret = k.k->p.offset - idx;
1692 err:
1693         bch2_trans_iter_put(trans, iter);
1694         return ret;
1695 }
1696
1697 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1698                         struct bkey_s_c_reflink_p p, unsigned offset,
1699                         s64 sectors, unsigned flags)
1700 {
1701         u64 idx = le64_to_cpu(p.v->idx) + offset;
1702         s64 ret = 0;
1703
1704         sectors = abs(sectors);
1705         BUG_ON(offset + sectors > p.k->size);
1706
1707         while (sectors) {
1708                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1709                 if (ret < 0)
1710                         break;
1711
1712                 idx += ret;
1713                 sectors = max_t(s64, 0LL, sectors - ret);
1714                 ret = 0;
1715         }
1716
1717         return ret;
1718 }
1719
1720 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
1721                         unsigned offset, s64 sectors, unsigned flags)
1722 {
1723         struct replicas_delta_list *d;
1724         struct bch_fs *c = trans->c;
1725
1726         switch (k.k->type) {
1727         case KEY_TYPE_btree_ptr:
1728                 sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
1729                         ?  c->opts.btree_node_size
1730                         : -c->opts.btree_node_size;
1731
1732                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1733                                               flags, BCH_DATA_BTREE);
1734         case KEY_TYPE_extent:
1735         case KEY_TYPE_reflink_v:
1736                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1737                                               flags, BCH_DATA_USER);
1738         case KEY_TYPE_inode:
1739                 d = replicas_deltas_realloc(trans, 0);
1740
1741                 if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
1742                         d->fs_usage.nr_inodes++;
1743                 else
1744                         d->fs_usage.nr_inodes--;
1745                 return 0;
1746         case KEY_TYPE_reservation: {
1747                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1748
1749                 d = replicas_deltas_realloc(trans, 0);
1750
1751                 sectors *= replicas;
1752                 replicas = clamp_t(unsigned, replicas, 1,
1753                                    ARRAY_SIZE(d->fs_usage.persistent_reserved));
1754
1755                 d->fs_usage.reserved                            += sectors;
1756                 d->fs_usage.persistent_reserved[replicas - 1]   += sectors;
1757                 return 0;
1758         }
1759         case KEY_TYPE_reflink_p:
1760                 return bch2_trans_mark_reflink_p(trans,
1761                                         bkey_s_c_to_reflink_p(k),
1762                                         offset, sectors, flags);
1763         default:
1764                 return 0;
1765         }
1766 }
1767
1768 int bch2_trans_mark_update(struct btree_trans *trans,
1769                            struct btree_iter *iter,
1770                            struct bkey_i *insert)
1771 {
1772         struct btree            *b = iter->l[0].b;
1773         struct btree_node_iter  node_iter = iter->l[0].iter;
1774         struct bkey_packed      *_k;
1775         int ret;
1776
1777         if (!btree_node_type_needs_gc(iter->btree_id))
1778                 return 0;
1779
1780         ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
1781                         0, insert->k.size, BCH_BUCKET_MARK_INSERT);
1782         if (ret)
1783                 return ret;
1784
1785         if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
1786                 return 0;
1787
1788         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1789                                                       KEY_TYPE_discard))) {
1790                 struct bkey             unpacked;
1791                 struct bkey_s_c         k;
1792                 unsigned                offset = 0;
1793                 s64                     sectors = 0;
1794                 unsigned                flags = BCH_BUCKET_MARK_OVERWRITE;
1795
1796                 k = bkey_disassemble(b, _k, &unpacked);
1797
1798                 if (btree_node_is_extents(b)
1799                     ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
1800                     : bkey_cmp(insert->k.p, k.k->p))
1801                         break;
1802
1803                 if (btree_node_is_extents(b)) {
1804                         switch (bch2_extent_overlap(&insert->k, k.k)) {
1805                         case BCH_EXTENT_OVERLAP_ALL:
1806                                 offset = 0;
1807                                 sectors = -((s64) k.k->size);
1808                                 break;
1809                         case BCH_EXTENT_OVERLAP_BACK:
1810                                 offset = bkey_start_offset(&insert->k) -
1811                                         bkey_start_offset(k.k);
1812                                 sectors = bkey_start_offset(&insert->k) -
1813                                         k.k->p.offset;
1814                                 break;
1815                         case BCH_EXTENT_OVERLAP_FRONT:
1816                                 offset = 0;
1817                                 sectors = bkey_start_offset(k.k) -
1818                                         insert->k.p.offset;
1819                                 break;
1820                         case BCH_EXTENT_OVERLAP_MIDDLE:
1821                                 offset = bkey_start_offset(&insert->k) -
1822                                         bkey_start_offset(k.k);
1823                                 sectors = -((s64) insert->k.size);
1824                                 flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
1825                                 break;
1826                         }
1827
1828                         BUG_ON(sectors >= 0);
1829                 }
1830
1831                 ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
1832                 if (ret)
1833                         return ret;
1834
1835                 bch2_btree_node_iter_advance(&node_iter, b);
1836         }
1837
1838         return 0;
1839 }
1840
1841 /* Disk reservations: */
1842
1843 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
1844 {
1845         percpu_u64_set(&c->pcpu->sectors_available, 0);
1846
1847         return avail_factor(__bch2_fs_usage_read_short(c).free);
1848 }
1849
1850 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
1851 {
1852         percpu_down_read(&c->mark_lock);
1853         this_cpu_sub(c->usage[0]->online_reserved,
1854                      res->sectors);
1855         percpu_up_read(&c->mark_lock);
1856
1857         res->sectors = 0;
1858 }
1859
1860 #define SECTORS_CACHE   1024
1861
1862 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1863                               unsigned sectors, int flags)
1864 {
1865         struct bch_fs_pcpu *pcpu;
1866         u64 old, v, get;
1867         s64 sectors_available;
1868         int ret;
1869
1870         percpu_down_read(&c->mark_lock);
1871         preempt_disable();
1872         pcpu = this_cpu_ptr(c->pcpu);
1873
1874         if (sectors <= pcpu->sectors_available)
1875                 goto out;
1876
1877         v = atomic64_read(&c->sectors_available);
1878         do {
1879                 old = v;
1880                 get = min((u64) sectors + SECTORS_CACHE, old);
1881
1882                 if (get < sectors) {
1883                         preempt_enable();
1884                         percpu_up_read(&c->mark_lock);
1885                         goto recalculate;
1886                 }
1887         } while ((v = atomic64_cmpxchg(&c->sectors_available,
1888                                        old, old - get)) != old);
1889
1890         pcpu->sectors_available         += get;
1891
1892 out:
1893         pcpu->sectors_available         -= sectors;
1894         this_cpu_add(c->usage[0]->online_reserved, sectors);
1895         res->sectors                    += sectors;
1896
1897         preempt_enable();
1898         percpu_up_read(&c->mark_lock);
1899         return 0;
1900
1901 recalculate:
1902         percpu_down_write(&c->mark_lock);
1903
1904         sectors_available = bch2_recalc_sectors_available(c);
1905
1906         if (sectors <= sectors_available ||
1907             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1908                 atomic64_set(&c->sectors_available,
1909                              max_t(s64, 0, sectors_available - sectors));
1910                 this_cpu_add(c->usage[0]->online_reserved, sectors);
1911                 res->sectors                    += sectors;
1912                 ret = 0;
1913         } else {
1914                 atomic64_set(&c->sectors_available, sectors_available);
1915                 ret = -ENOSPC;
1916         }
1917
1918         percpu_up_write(&c->mark_lock);
1919
1920         return ret;
1921 }
1922
1923 /* Startup/shutdown: */
1924
1925 static void buckets_free_rcu(struct rcu_head *rcu)
1926 {
1927         struct bucket_array *buckets =
1928                 container_of(rcu, struct bucket_array, rcu);
1929
1930         kvpfree(buckets,
1931                 sizeof(struct bucket_array) +
1932                 buckets->nbuckets * sizeof(struct bucket));
1933 }
1934
1935 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1936 {
1937         struct bucket_array *buckets = NULL, *old_buckets = NULL;
1938         unsigned long *buckets_nouse = NULL;
1939         alloc_fifo      free[RESERVE_NR];
1940         alloc_fifo      free_inc;
1941         alloc_heap      alloc_heap;
1942         copygc_heap     copygc_heap;
1943
1944         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1945                              ca->mi.bucket_size / c->opts.btree_node_size);
1946         /* XXX: these should be tunable */
1947         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
1948         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 7);
1949         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
1950                                       btree_reserve * 2);
1951         bool resize = ca->buckets[0] != NULL,
1952              start_copygc = ca->copygc_thread != NULL;
1953         int ret = -ENOMEM;
1954         unsigned i;
1955
1956         memset(&free,           0, sizeof(free));
1957         memset(&free_inc,       0, sizeof(free_inc));
1958         memset(&alloc_heap,     0, sizeof(alloc_heap));
1959         memset(&copygc_heap,    0, sizeof(copygc_heap));
1960
1961         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
1962                                             nbuckets * sizeof(struct bucket),
1963                                             GFP_KERNEL|__GFP_ZERO)) ||
1964             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1965                                             sizeof(unsigned long),
1966                                             GFP_KERNEL|__GFP_ZERO)) ||
1967             !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
1968             !init_fifo(&free[RESERVE_MOVINGGC],
1969                        copygc_reserve, GFP_KERNEL) ||
1970             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
1971             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
1972             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) ||
1973             !init_heap(&copygc_heap,    copygc_reserve, GFP_KERNEL))
1974                 goto err;
1975
1976         buckets->first_bucket   = ca->mi.first_bucket;
1977         buckets->nbuckets       = nbuckets;
1978
1979         bch2_copygc_stop(ca);
1980
1981         if (resize) {
1982                 down_write(&c->gc_lock);
1983                 down_write(&ca->bucket_lock);
1984                 percpu_down_write(&c->mark_lock);
1985         }
1986
1987         old_buckets = bucket_array(ca);
1988
1989         if (resize) {
1990                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
1991
1992                 memcpy(buckets->b,
1993                        old_buckets->b,
1994                        n * sizeof(struct bucket));
1995                 memcpy(buckets_nouse,
1996                        ca->buckets_nouse,
1997                        BITS_TO_LONGS(n) * sizeof(unsigned long));
1998         }
1999
2000         rcu_assign_pointer(ca->buckets[0], buckets);
2001         buckets = old_buckets;
2002
2003         swap(ca->buckets_nouse, buckets_nouse);
2004
2005         if (resize)
2006                 percpu_up_write(&c->mark_lock);
2007
2008         spin_lock(&c->freelist_lock);
2009         for (i = 0; i < RESERVE_NR; i++) {
2010                 fifo_move(&free[i], &ca->free[i]);
2011                 swap(ca->free[i], free[i]);
2012         }
2013         fifo_move(&free_inc, &ca->free_inc);
2014         swap(ca->free_inc, free_inc);
2015         spin_unlock(&c->freelist_lock);
2016
2017         /* with gc lock held, alloc_heap can't be in use: */
2018         swap(ca->alloc_heap, alloc_heap);
2019
2020         /* and we shut down copygc: */
2021         swap(ca->copygc_heap, copygc_heap);
2022
2023         nbuckets = ca->mi.nbuckets;
2024
2025         if (resize) {
2026                 up_write(&ca->bucket_lock);
2027                 up_write(&c->gc_lock);
2028         }
2029
2030         if (start_copygc &&
2031             bch2_copygc_start(c, ca))
2032                 bch_err(ca, "error restarting copygc thread");
2033
2034         ret = 0;
2035 err:
2036         free_heap(&copygc_heap);
2037         free_heap(&alloc_heap);
2038         free_fifo(&free_inc);
2039         for (i = 0; i < RESERVE_NR; i++)
2040                 free_fifo(&free[i]);
2041         kvpfree(buckets_nouse,
2042                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2043         if (buckets)
2044                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2045
2046         return ret;
2047 }
2048
2049 void bch2_dev_buckets_free(struct bch_dev *ca)
2050 {
2051         unsigned i;
2052
2053         free_heap(&ca->copygc_heap);
2054         free_heap(&ca->alloc_heap);
2055         free_fifo(&ca->free_inc);
2056         for (i = 0; i < RESERVE_NR; i++)
2057                 free_fifo(&ca->free[i]);
2058         kvpfree(ca->buckets_nouse,
2059                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2060         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2061                 sizeof(struct bucket_array) +
2062                 ca->mi.nbuckets * sizeof(struct bucket));
2063
2064         free_percpu(ca->usage[0]);
2065 }
2066
2067 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2068 {
2069         if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2070                 return -ENOMEM;
2071
2072         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2073 }