]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
Update bcachefs sources to 14f68409be bcachefs: Optimize fiemap
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  *
7  * Bucket states:
8  * - free bucket: mark == 0
9  *   The bucket contains no data and will not be read
10  *
11  * - allocator bucket: owned_by_allocator == 1
12  *   The bucket is on a free list, or it is an open bucket
13  *
14  * - cached bucket: owned_by_allocator == 0 &&
15  *                  dirty_sectors == 0 &&
16  *                  cached_sectors > 0
17  *   The bucket contains data but may be safely discarded as there are
18  *   enough replicas of the data on other cache devices, or it has been
19  *   written back to the backing device
20  *
21  * - dirty bucket: owned_by_allocator == 0 &&
22  *                 dirty_sectors > 0
23  *   The bucket contains data that we must not discard (either only copy,
24  *   or one of the 'main copies' for data requiring multiple replicas)
25  *
26  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27  *   This is a btree node, journal or gen/prio bucket
28  *
29  * Lifecycle:
30  *
31  * bucket invalidated => bucket on freelist => open bucket =>
32  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
33  *
34  * Note that cache promotion can skip the dirty bucket step, as data
35  * is copied from a deeper tier to a shallower tier, onto a cached
36  * bucket.
37  * Note also that a cached bucket can spontaneously become dirty --
38  * see below.
39  *
40  * Only a traversal of the key space can determine whether a bucket is
41  * truly dirty or cached.
42  *
43  * Transitions:
44  *
45  * - free => allocator: bucket was invalidated
46  * - cached => allocator: bucket was invalidated
47  *
48  * - allocator => dirty: open bucket was filled up
49  * - allocator => cached: open bucket was filled up
50  * - allocator => metadata: metadata was allocated
51  *
52  * - dirty => cached: dirty sectors were copied to a deeper tier
53  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54  * - cached => free: cached sectors were overwritten
55  *
56  * - metadata => free: metadata was freed
57  *
58  * Oddities:
59  * - cached => dirty: a device was removed so formerly replicated data
60  *                    is no longer sufficiently replicated
61  * - free => cached: cannot happen
62  * - free => dirty: cannot happen
63  * - free => metadata: cannot happen
64  */
65
66 #include "bcachefs.h"
67 #include "alloc_background.h"
68 #include "bset.h"
69 #include "btree_gc.h"
70 #include "btree_update.h"
71 #include "buckets.h"
72 #include "ec.h"
73 #include "error.h"
74 #include "movinggc.h"
75 #include "replicas.h"
76
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
79
80 /*
81  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
82  * wraparound:
83  */
84 void bch2_bucket_seq_cleanup(struct bch_fs *c)
85 {
86         u64 journal_seq = atomic64_read(&c->journal.seq);
87         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
88         struct bch_dev *ca;
89         struct bucket_array *buckets;
90         struct bucket *g;
91         struct bucket_mark m;
92         unsigned i;
93
94         if (journal_seq - c->last_bucket_seq_cleanup <
95             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
96                 return;
97
98         c->last_bucket_seq_cleanup = journal_seq;
99
100         for_each_member_device(ca, c, i) {
101                 down_read(&ca->bucket_lock);
102                 buckets = bucket_array(ca);
103
104                 for_each_bucket(g, buckets) {
105                         bucket_cmpxchg(g, m, ({
106                                 if (!m.journal_seq_valid ||
107                                     bucket_needs_journal_commit(m, last_seq_ondisk))
108                                         break;
109
110                                 m.journal_seq_valid = 0;
111                         }));
112                 }
113                 up_read(&ca->bucket_lock);
114         }
115 }
116
117 void bch2_fs_usage_initialize(struct bch_fs *c)
118 {
119         struct bch_fs_usage *usage;
120         unsigned i;
121
122         percpu_down_write(&c->mark_lock);
123         usage = c->usage_base;
124
125         bch2_fs_usage_acc_to_base(c, 0);
126         bch2_fs_usage_acc_to_base(c, 1);
127
128         for (i = 0; i < BCH_REPLICAS_MAX; i++)
129                 usage->reserved += usage->persistent_reserved[i];
130
131         for (i = 0; i < c->replicas.nr; i++) {
132                 struct bch_replicas_entry *e =
133                         cpu_replicas_entry(&c->replicas, i);
134
135                 switch (e->data_type) {
136                 case BCH_DATA_BTREE:
137                         usage->btree    += usage->replicas[i];
138                         break;
139                 case BCH_DATA_USER:
140                         usage->data     += usage->replicas[i];
141                         break;
142                 case BCH_DATA_CACHED:
143                         usage->cached   += usage->replicas[i];
144                         break;
145                 }
146         }
147
148         percpu_up_write(&c->mark_lock);
149 }
150
151 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
152 {
153         if (fs_usage == c->usage_scratch)
154                 mutex_unlock(&c->usage_scratch_lock);
155         else
156                 kfree(fs_usage);
157 }
158
159 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
160 {
161         struct bch_fs_usage *ret;
162         unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
163
164         ret = kzalloc(bytes, GFP_NOWAIT);
165         if (ret)
166                 return ret;
167
168         if (mutex_trylock(&c->usage_scratch_lock))
169                 goto out_pool;
170
171         ret = kzalloc(bytes, GFP_NOFS);
172         if (ret)
173                 return ret;
174
175         mutex_lock(&c->usage_scratch_lock);
176 out_pool:
177         ret = c->usage_scratch;
178         memset(ret, 0, bytes);
179         return ret;
180 }
181
182 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
183 {
184         struct bch_dev_usage ret;
185
186         memset(&ret, 0, sizeof(ret));
187         acc_u64s_percpu((u64 *) &ret,
188                         (u64 __percpu *) ca->usage[0],
189                         sizeof(ret) / sizeof(u64));
190
191         return ret;
192 }
193
194 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
195                                                 unsigned journal_seq,
196                                                 bool gc)
197 {
198         return this_cpu_ptr(gc
199                             ? c->usage_gc
200                             : c->usage[journal_seq & 1]);
201 }
202
203 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
204 {
205         ssize_t offset = v - (u64 *) c->usage_base;
206         unsigned seq;
207         u64 ret;
208
209         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
210         percpu_rwsem_assert_held(&c->mark_lock);
211
212         do {
213                 seq = read_seqcount_begin(&c->usage_lock);
214                 ret = *v +
215                         percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
216                         percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
217         } while (read_seqcount_retry(&c->usage_lock, seq));
218
219         return ret;
220 }
221
222 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
223 {
224         struct bch_fs_usage *ret;
225         unsigned seq, v, u64s = fs_usage_u64s(c);
226 retry:
227         ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
228         if (unlikely(!ret))
229                 return NULL;
230
231         percpu_down_read(&c->mark_lock);
232
233         v = fs_usage_u64s(c);
234         if (unlikely(u64s != v)) {
235                 u64s = v;
236                 percpu_up_read(&c->mark_lock);
237                 kfree(ret);
238                 goto retry;
239         }
240
241         do {
242                 seq = read_seqcount_begin(&c->usage_lock);
243                 memcpy(ret, c->usage_base, u64s * sizeof(u64));
244                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
245                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[1], u64s);
246         } while (read_seqcount_retry(&c->usage_lock, seq));
247
248         return ret;
249 }
250
251 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
252 {
253         unsigned u64s = fs_usage_u64s(c);
254
255         BUG_ON(idx >= 2);
256
257         write_seqcount_begin(&c->usage_lock);
258
259         acc_u64s_percpu((u64 *) c->usage_base,
260                         (u64 __percpu *) c->usage[idx], u64s);
261         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
262
263         write_seqcount_end(&c->usage_lock);
264 }
265
266 void bch2_fs_usage_to_text(struct printbuf *out,
267                            struct bch_fs *c,
268                            struct bch_fs_usage *fs_usage)
269 {
270         unsigned i;
271
272         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
273
274         pr_buf(out, "hidden:\t\t\t\t%llu\n",
275                fs_usage->hidden);
276         pr_buf(out, "data:\t\t\t\t%llu\n",
277                fs_usage->data);
278         pr_buf(out, "cached:\t\t\t\t%llu\n",
279                fs_usage->cached);
280         pr_buf(out, "reserved:\t\t\t%llu\n",
281                fs_usage->reserved);
282         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
283                fs_usage->nr_inodes);
284         pr_buf(out, "online reserved:\t\t%llu\n",
285                fs_usage->online_reserved);
286
287         for (i = 0;
288              i < ARRAY_SIZE(fs_usage->persistent_reserved);
289              i++) {
290                 pr_buf(out, "%u replicas:\n", i + 1);
291                 pr_buf(out, "\treserved:\t\t%llu\n",
292                        fs_usage->persistent_reserved[i]);
293         }
294
295         for (i = 0; i < c->replicas.nr; i++) {
296                 struct bch_replicas_entry *e =
297                         cpu_replicas_entry(&c->replicas, i);
298
299                 pr_buf(out, "\t");
300                 bch2_replicas_entry_to_text(out, e);
301                 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
302         }
303 }
304
305 #define RESERVE_FACTOR  6
306
307 static u64 reserve_factor(u64 r)
308 {
309         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
310 }
311
312 static u64 avail_factor(u64 r)
313 {
314         return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
315 }
316
317 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
318 {
319         return min(fs_usage->hidden +
320                    fs_usage->btree +
321                    fs_usage->data +
322                    reserve_factor(fs_usage->reserved +
323                                   fs_usage->online_reserved),
324                    c->capacity);
325 }
326
327 static struct bch_fs_usage_short
328 __bch2_fs_usage_read_short(struct bch_fs *c)
329 {
330         struct bch_fs_usage_short ret;
331         u64 data, reserved;
332
333         ret.capacity = c->capacity -
334                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
335
336         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
337                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
338         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
339                 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
340
341         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
342         ret.free        = ret.capacity - ret.used;
343
344         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
345
346         return ret;
347 }
348
349 struct bch_fs_usage_short
350 bch2_fs_usage_read_short(struct bch_fs *c)
351 {
352         struct bch_fs_usage_short ret;
353
354         percpu_down_read(&c->mark_lock);
355         ret = __bch2_fs_usage_read_short(c);
356         percpu_up_read(&c->mark_lock);
357
358         return ret;
359 }
360
361 static inline int is_unavailable_bucket(struct bucket_mark m)
362 {
363         return !is_available_bucket(m);
364 }
365
366 static inline int is_fragmented_bucket(struct bucket_mark m,
367                                        struct bch_dev *ca)
368 {
369         if (!m.owned_by_allocator &&
370             m.data_type == BCH_DATA_USER &&
371             bucket_sectors_used(m))
372                 return max_t(int, 0, (int) ca->mi.bucket_size -
373                              bucket_sectors_used(m));
374         return 0;
375 }
376
377 static inline enum bch_data_type bucket_type(struct bucket_mark m)
378 {
379         return m.cached_sectors && !m.dirty_sectors
380                 ? BCH_DATA_CACHED
381                 : m.data_type;
382 }
383
384 static bool bucket_became_unavailable(struct bucket_mark old,
385                                       struct bucket_mark new)
386 {
387         return is_available_bucket(old) &&
388                !is_available_bucket(new);
389 }
390
391 int bch2_fs_usage_apply(struct bch_fs *c,
392                         struct bch_fs_usage *fs_usage,
393                         struct disk_reservation *disk_res,
394                         unsigned journal_seq)
395 {
396         s64 added = fs_usage->data + fs_usage->reserved;
397         s64 should_not_have_added;
398         int ret = 0;
399
400         percpu_rwsem_assert_held(&c->mark_lock);
401
402         /*
403          * Not allowed to reduce sectors_available except by getting a
404          * reservation:
405          */
406         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
407         if (WARN_ONCE(should_not_have_added > 0,
408                       "disk usage increased by %lli without a reservation",
409                       should_not_have_added)) {
410                 atomic64_sub(should_not_have_added, &c->sectors_available);
411                 added -= should_not_have_added;
412                 ret = -1;
413         }
414
415         if (added > 0) {
416                 disk_res->sectors               -= added;
417                 fs_usage->online_reserved       -= added;
418         }
419
420         preempt_disable();
421         acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
422                  (u64 *) fs_usage, fs_usage_u64s(c));
423         preempt_enable();
424
425         return ret;
426 }
427
428 static inline void account_bucket(struct bch_fs_usage *fs_usage,
429                                   struct bch_dev_usage *dev_usage,
430                                   enum bch_data_type type,
431                                   int nr, s64 size)
432 {
433         if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
434                 fs_usage->hidden        += size;
435
436         dev_usage->buckets[type]        += nr;
437 }
438
439 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
440                                   struct bch_fs_usage *fs_usage,
441                                   struct bucket_mark old, struct bucket_mark new,
442                                   bool gc)
443 {
444         struct bch_dev_usage *dev_usage;
445
446         percpu_rwsem_assert_held(&c->mark_lock);
447
448         bch2_fs_inconsistent_on(old.data_type && new.data_type &&
449                                 old.data_type != new.data_type, c,
450                 "different types of data in same bucket: %s, %s",
451                 bch2_data_types[old.data_type],
452                 bch2_data_types[new.data_type]);
453
454         preempt_disable();
455         dev_usage = this_cpu_ptr(ca->usage[gc]);
456
457         if (bucket_type(old))
458                 account_bucket(fs_usage, dev_usage, bucket_type(old),
459                                -1, -ca->mi.bucket_size);
460
461         if (bucket_type(new))
462                 account_bucket(fs_usage, dev_usage, bucket_type(new),
463                                1, ca->mi.bucket_size);
464
465         dev_usage->buckets_alloc +=
466                 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
467         dev_usage->buckets_ec +=
468                 (int) new.stripe - (int) old.stripe;
469         dev_usage->buckets_unavailable +=
470                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
471
472         dev_usage->sectors[old.data_type] -= old.dirty_sectors;
473         dev_usage->sectors[new.data_type] += new.dirty_sectors;
474         dev_usage->sectors[BCH_DATA_CACHED] +=
475                 (int) new.cached_sectors - (int) old.cached_sectors;
476         dev_usage->sectors_fragmented +=
477                 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
478         preempt_enable();
479
480         if (!is_available_bucket(old) && is_available_bucket(new))
481                 bch2_wake_allocator(ca);
482 }
483
484 void bch2_dev_usage_from_buckets(struct bch_fs *c)
485 {
486         struct bch_dev *ca;
487         struct bucket_mark old = { .v.counter = 0 };
488         struct bucket_array *buckets;
489         struct bucket *g;
490         unsigned i;
491         int cpu;
492
493         c->usage_base->hidden = 0;
494
495         for_each_member_device(ca, c, i) {
496                 for_each_possible_cpu(cpu)
497                         memset(per_cpu_ptr(ca->usage[0], cpu), 0,
498                                sizeof(*ca->usage[0]));
499
500                 buckets = bucket_array(ca);
501
502                 for_each_bucket(g, buckets)
503                         bch2_dev_usage_update(c, ca, c->usage_base,
504                                               old, g->mark, false);
505         }
506 }
507
508 #define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr)      \
509 ({                                                              \
510         struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
511                                                                 \
512         bch2_dev_usage_update(c, ca, fs_usage, _old, new, gc);  \
513         _old;                                                   \
514 })
515
516 static inline void update_replicas(struct bch_fs *c,
517                                    struct bch_fs_usage *fs_usage,
518                                    struct bch_replicas_entry *r,
519                                    s64 sectors)
520 {
521         int idx = bch2_replicas_entry_idx(c, r);
522
523         BUG_ON(idx < 0);
524         BUG_ON(!sectors);
525
526         switch (r->data_type) {
527         case BCH_DATA_BTREE:
528                 fs_usage->btree         += sectors;
529                 break;
530         case BCH_DATA_USER:
531                 fs_usage->data          += sectors;
532                 break;
533         case BCH_DATA_CACHED:
534                 fs_usage->cached        += sectors;
535                 break;
536         }
537         fs_usage->replicas[idx]         += sectors;
538 }
539
540 static inline void update_cached_sectors(struct bch_fs *c,
541                                          struct bch_fs_usage *fs_usage,
542                                          unsigned dev, s64 sectors)
543 {
544         struct bch_replicas_padded r;
545
546         bch2_replicas_entry_cached(&r.e, dev);
547
548         update_replicas(c, fs_usage, &r.e, sectors);
549 }
550
551 static struct replicas_delta_list *
552 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
553 {
554         struct replicas_delta_list *d = trans->fs_usage_deltas;
555         unsigned new_size = d ? (d->size + more) * 2 : 128;
556
557         if (!d || d->used + more > d->size) {
558                 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
559                 BUG_ON(!d);
560
561                 d->size = new_size;
562                 trans->fs_usage_deltas = d;
563         }
564         return d;
565 }
566
567 static inline void update_replicas_list(struct btree_trans *trans,
568                                         struct bch_replicas_entry *r,
569                                         s64 sectors)
570 {
571         struct replicas_delta_list *d;
572         struct replicas_delta *n;
573         unsigned b = replicas_entry_bytes(r) + 8;
574
575         d = replicas_deltas_realloc(trans, b);
576
577         n = (void *) d->d + d->used;
578         n->delta = sectors;
579         memcpy(&n->r, r, replicas_entry_bytes(r));
580         d->used += b;
581 }
582
583 static inline void update_cached_sectors_list(struct btree_trans *trans,
584                                               unsigned dev, s64 sectors)
585 {
586         struct bch_replicas_padded r;
587
588         bch2_replicas_entry_cached(&r.e, dev);
589
590         update_replicas_list(trans, &r.e, sectors);
591 }
592
593 void bch2_replicas_delta_list_apply(struct bch_fs *c,
594                                     struct bch_fs_usage *fs_usage,
595                                     struct replicas_delta_list *r)
596 {
597         struct replicas_delta *d = r->d;
598         struct replicas_delta *top = (void *) r->d + r->used;
599
600         acc_u64s((u64 *) fs_usage,
601                  (u64 *) &r->fs_usage, sizeof(*fs_usage) / sizeof(u64));
602
603         while (d != top) {
604                 BUG_ON((void *) d > (void *) top);
605
606                 update_replicas(c, fs_usage, &d->r, d->delta);
607
608                 d = (void *) d + replicas_entry_bytes(&d->r) + 8;
609         }
610 }
611
612 #define do_mark_fn(fn, c, pos, flags, ...)                              \
613 ({                                                                      \
614         int gc, ret = 0;                                                \
615                                                                         \
616         percpu_rwsem_assert_held(&c->mark_lock);                        \
617                                                                         \
618         for (gc = 0; gc < 2 && !ret; gc++)                              \
619                 if (!gc == !(flags & BCH_BUCKET_MARK_GC) ||             \
620                     (gc && gc_visited(c, pos)))                         \
621                         ret = fn(c, __VA_ARGS__, gc);                   \
622         ret;                                                            \
623 })
624
625 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
626                                     size_t b, struct bucket_mark *ret,
627                                     bool gc)
628 {
629         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
630         struct bucket *g = __bucket(ca, b, gc);
631         struct bucket_mark old, new;
632
633         old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
634                 BUG_ON(!is_available_bucket(new));
635
636                 new.owned_by_allocator  = true;
637                 new.dirty               = true;
638                 new.data_type           = 0;
639                 new.cached_sectors      = 0;
640                 new.dirty_sectors       = 0;
641                 new.gen++;
642         }));
643
644         if (old.cached_sectors)
645                 update_cached_sectors(c, fs_usage, ca->dev_idx,
646                                       -((s64) old.cached_sectors));
647
648         if (!gc)
649                 *ret = old;
650         return 0;
651 }
652
653 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
654                             size_t b, struct bucket_mark *old)
655 {
656         do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
657                    ca, b, old);
658
659         if (!old->owned_by_allocator && old->cached_sectors)
660                 trace_invalidate(ca, bucket_to_sector(ca, b),
661                                  old->cached_sectors);
662 }
663
664 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
665                                     size_t b, bool owned_by_allocator,
666                                     bool gc)
667 {
668         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
669         struct bucket *g = __bucket(ca, b, gc);
670         struct bucket_mark old, new;
671
672         old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
673                 new.owned_by_allocator  = owned_by_allocator;
674         }));
675
676         BUG_ON(!gc &&
677                !owned_by_allocator && !old.owned_by_allocator);
678
679         return 0;
680 }
681
682 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
683                             size_t b, bool owned_by_allocator,
684                             struct gc_pos pos, unsigned flags)
685 {
686         preempt_disable();
687
688         do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
689                    ca, b, owned_by_allocator);
690
691         preempt_enable();
692 }
693
694 static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
695                            struct bch_fs_usage *fs_usage,
696                            u64 journal_seq, unsigned flags)
697 {
698         bool gc = flags & BCH_BUCKET_MARK_GC;
699         struct bkey_alloc_unpacked u;
700         struct bch_dev *ca;
701         struct bucket *g;
702         struct bucket_mark old, m;
703
704         /*
705          * alloc btree is read in by bch2_alloc_read, not gc:
706          */
707         if ((flags & BCH_BUCKET_MARK_GC) &&
708             !(flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE))
709                 return 0;
710
711         ca = bch_dev_bkey_exists(c, k.k->p.inode);
712
713         if (k.k->p.offset >= ca->mi.nbuckets)
714                 return 0;
715
716         g = __bucket(ca, k.k->p.offset, gc);
717         u = bch2_alloc_unpack(k);
718
719         old = bucket_cmpxchg(g, m, ({
720                 m.gen                   = u.gen;
721                 m.data_type             = u.data_type;
722                 m.dirty_sectors         = u.dirty_sectors;
723                 m.cached_sectors        = u.cached_sectors;
724
725                 if (journal_seq) {
726                         m.journal_seq_valid     = 1;
727                         m.journal_seq           = journal_seq;
728                 }
729         }));
730
731         if (!(flags & BCH_BUCKET_MARK_ALLOC_READ))
732                 bch2_dev_usage_update(c, ca, fs_usage, old, m, gc);
733
734         g->io_time[READ]        = u.read_time;
735         g->io_time[WRITE]       = u.write_time;
736         g->oldest_gen           = u.oldest_gen;
737         g->gen_valid            = 1;
738
739         /*
740          * need to know if we're getting called from the invalidate path or
741          * not:
742          */
743
744         if ((flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE) &&
745             old.cached_sectors) {
746                 update_cached_sectors(c, fs_usage, ca->dev_idx,
747                                       -old.cached_sectors);
748                 trace_invalidate(ca, bucket_to_sector(ca, k.k->p.offset),
749                                  old.cached_sectors);
750         }
751
752         return 0;
753 }
754
755 #define checked_add(a, b)                                       \
756 ({                                                              \
757         unsigned _res = (unsigned) (a) + (b);                   \
758         bool overflow = _res > U16_MAX;                         \
759         if (overflow)                                           \
760                 _res = U16_MAX;                                 \
761         (a) = _res;                                             \
762         overflow;                                               \
763 })
764
765 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
766                                        size_t b, enum bch_data_type type,
767                                        unsigned sectors, bool gc)
768 {
769         struct bucket *g = __bucket(ca, b, gc);
770         struct bucket_mark old, new;
771         bool overflow;
772
773         BUG_ON(type != BCH_DATA_SB &&
774                type != BCH_DATA_JOURNAL);
775
776         old = bucket_cmpxchg(g, new, ({
777                 new.dirty       = true;
778                 new.data_type   = type;
779                 overflow = checked_add(new.dirty_sectors, sectors);
780         }));
781
782         bch2_fs_inconsistent_on(overflow, c,
783                 "bucket sector count overflow: %u + %u > U16_MAX",
784                 old.dirty_sectors, sectors);
785
786         if (c)
787                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
788                                       old, new, gc);
789
790         return 0;
791 }
792
793 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
794                                size_t b, enum bch_data_type type,
795                                unsigned sectors, struct gc_pos pos,
796                                unsigned flags)
797 {
798         BUG_ON(type != BCH_DATA_SB &&
799                type != BCH_DATA_JOURNAL);
800
801         preempt_disable();
802
803         if (likely(c)) {
804                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
805                            ca, b, type, sectors);
806         } else {
807                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
808         }
809
810         preempt_enable();
811 }
812
813 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
814                                   unsigned offset, s64 delta,
815                                   unsigned flags)
816 {
817         if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
818                 BUG_ON(offset + -delta > p.crc.live_size);
819
820                 return -((s64) ptr_disk_sectors(p)) +
821                         __ptr_disk_sectors(p, offset) +
822                         __ptr_disk_sectors(p, p.crc.live_size -
823                                            offset + delta);
824         } else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
825                 BUG_ON(offset + -delta > p.crc.live_size);
826
827                 return -((s64) ptr_disk_sectors(p)) +
828                         __ptr_disk_sectors(p, p.crc.live_size +
829                                            delta);
830         } else {
831                 return ptr_disk_sectors(p);
832         }
833 }
834
835 static void bucket_set_stripe(struct bch_fs *c,
836                               const struct bch_stripe *v,
837                               struct bch_fs_usage *fs_usage,
838                               u64 journal_seq,
839                               unsigned flags)
840 {
841         bool enabled = !(flags & BCH_BUCKET_MARK_OVERWRITE);
842         bool gc = flags & BCH_BUCKET_MARK_GC;
843         unsigned i;
844
845         for (i = 0; i < v->nr_blocks; i++) {
846                 const struct bch_extent_ptr *ptr = v->ptrs + i;
847                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
848                 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
849                 struct bucket_mark new, old;
850
851                 old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
852                         new.dirty                       = true;
853                         new.stripe                      = enabled;
854                         if (journal_seq) {
855                                 new.journal_seq_valid   = 1;
856                                 new.journal_seq         = journal_seq;
857                         }
858                 }));
859
860                 /*
861                  * XXX write repair code for these, flag stripe as possibly bad
862                  */
863                 if (old.gen != ptr->gen)
864                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
865                                       "stripe with stale pointer");
866 #if 0
867                 /*
868                  * We'd like to check for these, but these checks don't work
869                  * yet:
870                  */
871                 if (old.stripe && enabled)
872                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
873                                       "multiple stripes using same bucket");
874
875                 if (!old.stripe && !enabled)
876                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
877                                       "deleting stripe but bucket not marked as stripe bucket");
878 #endif
879         }
880 }
881
882 static bool bch2_mark_pointer(struct bch_fs *c,
883                               struct extent_ptr_decoded p,
884                               s64 sectors, enum bch_data_type data_type,
885                               struct bch_fs_usage *fs_usage,
886                               u64 journal_seq, unsigned flags)
887 {
888         bool gc = flags & BCH_BUCKET_MARK_GC;
889         struct bucket_mark old, new;
890         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
891         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
892         bool overflow;
893         u64 v;
894
895         v = atomic64_read(&g->_mark.v);
896         do {
897                 new.v.counter = old.v.counter = v;
898
899                 new.dirty = true;
900
901                 /*
902                  * Check this after reading bucket mark to guard against
903                  * the allocator invalidating a bucket after we've already
904                  * checked the gen
905                  */
906                 if (gen_after(new.gen, p.ptr.gen)) {
907                         /* XXX write repair code for this */
908                         if (!p.ptr.cached &&
909                             test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
910                                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
911                                               "stale dirty pointer");
912                         return true;
913                 }
914
915                 if (!p.ptr.cached)
916                         overflow = checked_add(new.dirty_sectors, sectors);
917                 else
918                         overflow = checked_add(new.cached_sectors, sectors);
919
920                 if (!new.dirty_sectors &&
921                     !new.cached_sectors) {
922                         new.data_type   = 0;
923
924                         if (journal_seq) {
925                                 new.journal_seq_valid = 1;
926                                 new.journal_seq = journal_seq;
927                         }
928                 } else {
929                         new.data_type = data_type;
930                 }
931
932                 if (flags & BCH_BUCKET_MARK_NOATOMIC) {
933                         g->_mark = new;
934                         break;
935                 }
936         } while ((v = atomic64_cmpxchg(&g->_mark.v,
937                               old.v.counter,
938                               new.v.counter)) != old.v.counter);
939
940         bch2_fs_inconsistent_on(overflow, c,
941                 "bucket sector count overflow: %u + %lli > U16_MAX",
942                 !p.ptr.cached
943                 ? old.dirty_sectors
944                 : old.cached_sectors, sectors);
945
946         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
947
948         BUG_ON(!gc && bucket_became_unavailable(old, new));
949
950         return false;
951 }
952
953 static int bch2_mark_stripe_ptr(struct bch_fs *c,
954                                 struct bch_extent_stripe_ptr p,
955                                 enum bch_data_type data_type,
956                                 struct bch_fs_usage *fs_usage,
957                                 s64 sectors, unsigned flags)
958 {
959         bool gc = flags & BCH_BUCKET_MARK_GC;
960         struct stripe *m;
961         unsigned old, new, nr_data;
962         int blocks_nonempty_delta;
963         s64 parity_sectors;
964
965         BUG_ON(!sectors);
966
967         m = genradix_ptr(&c->stripes[gc], p.idx);
968
969         spin_lock(&c->ec_stripes_heap_lock);
970
971         if (!m || !m->alive) {
972                 spin_unlock(&c->ec_stripes_heap_lock);
973                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
974                                     (u64) p.idx);
975                 return -EIO;
976         }
977
978         BUG_ON(m->r.e.data_type != data_type);
979
980         nr_data = m->nr_blocks - m->nr_redundant;
981
982         parity_sectors = DIV_ROUND_UP(abs(sectors) * m->nr_redundant, nr_data);
983
984         if (sectors < 0)
985                 parity_sectors = -parity_sectors;
986         sectors += parity_sectors;
987
988         old = m->block_sectors[p.block];
989         m->block_sectors[p.block] += sectors;
990         new = m->block_sectors[p.block];
991
992         blocks_nonempty_delta = (int) !!new - (int) !!old;
993         if (blocks_nonempty_delta) {
994                 m->blocks_nonempty += blocks_nonempty_delta;
995
996                 if (!gc)
997                         bch2_stripes_heap_update(c, m, p.idx);
998         }
999
1000         m->dirty = true;
1001
1002         spin_unlock(&c->ec_stripes_heap_lock);
1003
1004         update_replicas(c, fs_usage, &m->r.e, sectors);
1005
1006         return 0;
1007 }
1008
1009 static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
1010                             unsigned offset, s64 sectors,
1011                             enum bch_data_type data_type,
1012                             struct bch_fs_usage *fs_usage,
1013                             unsigned journal_seq, unsigned flags)
1014 {
1015         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1016         const union bch_extent_entry *entry;
1017         struct extent_ptr_decoded p;
1018         struct bch_replicas_padded r;
1019         s64 dirty_sectors = 0;
1020         unsigned i;
1021         int ret;
1022
1023         r.e.data_type   = data_type;
1024         r.e.nr_devs     = 0;
1025         r.e.nr_required = 1;
1026
1027         BUG_ON(!sectors);
1028
1029         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1030                 s64 disk_sectors = data_type == BCH_DATA_BTREE
1031                         ? sectors
1032                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1033                 bool stale = bch2_mark_pointer(c, p, disk_sectors, data_type,
1034                                         fs_usage, journal_seq, flags);
1035
1036                 if (p.ptr.cached) {
1037                         if (disk_sectors && !stale)
1038                                 update_cached_sectors(c, fs_usage, p.ptr.dev,
1039                                                       disk_sectors);
1040                 } else if (!p.ec_nr) {
1041                         dirty_sectors          += disk_sectors;
1042                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1043                 } else {
1044                         for (i = 0; i < p.ec_nr; i++) {
1045                                 ret = bch2_mark_stripe_ptr(c, p.ec[i],
1046                                                 data_type, fs_usage,
1047                                                 disk_sectors, flags);
1048                                 if (ret)
1049                                         return ret;
1050                         }
1051
1052                         r.e.nr_required = 0;
1053                 }
1054         }
1055
1056         if (dirty_sectors)
1057                 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1058
1059         return 0;
1060 }
1061
1062 static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
1063                             struct bch_fs_usage *fs_usage,
1064                             u64 journal_seq, unsigned flags)
1065 {
1066         bool gc = flags & BCH_BUCKET_MARK_GC;
1067         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
1068         size_t idx = s.k->p.offset;
1069         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1070         unsigned i;
1071
1072         spin_lock(&c->ec_stripes_heap_lock);
1073
1074         if (!m || ((flags & BCH_BUCKET_MARK_OVERWRITE) && !m->alive)) {
1075                 spin_unlock(&c->ec_stripes_heap_lock);
1076                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1077                                     idx);
1078                 return -1;
1079         }
1080
1081         if (!(flags & BCH_BUCKET_MARK_OVERWRITE)) {
1082                 m->sectors      = le16_to_cpu(s.v->sectors);
1083                 m->algorithm    = s.v->algorithm;
1084                 m->nr_blocks    = s.v->nr_blocks;
1085                 m->nr_redundant = s.v->nr_redundant;
1086
1087                 bch2_bkey_to_replicas(&m->r.e, k);
1088
1089                 /*
1090                  * XXX: account for stripes somehow here
1091                  */
1092 #if 0
1093                 update_replicas(c, fs_usage, &m->r.e, stripe_sectors);
1094 #endif
1095
1096                 /* gc recalculates these fields: */
1097                 if (!(flags & BCH_BUCKET_MARK_GC)) {
1098                         for (i = 0; i < s.v->nr_blocks; i++) {
1099                                 m->block_sectors[i] =
1100                                         stripe_blockcount_get(s.v, i);
1101                                 m->blocks_nonempty += !!m->block_sectors[i];
1102                         }
1103                 }
1104
1105                 if (!gc)
1106                         bch2_stripes_heap_update(c, m, idx);
1107                 m->alive        = true;
1108         } else {
1109                 if (!gc)
1110                         bch2_stripes_heap_del(c, m, idx);
1111                 memset(m, 0, sizeof(*m));
1112         }
1113
1114         spin_unlock(&c->ec_stripes_heap_lock);
1115
1116         bucket_set_stripe(c, s.v, fs_usage, 0, flags);
1117         return 0;
1118 }
1119
1120 int bch2_mark_key_locked(struct bch_fs *c,
1121                    struct bkey_s_c k,
1122                    unsigned offset, s64 sectors,
1123                    struct bch_fs_usage *fs_usage,
1124                    u64 journal_seq, unsigned flags)
1125 {
1126         int ret = 0;
1127
1128         preempt_disable();
1129
1130         if (!fs_usage || (flags & BCH_BUCKET_MARK_GC))
1131                 fs_usage = fs_usage_ptr(c, journal_seq,
1132                                         flags & BCH_BUCKET_MARK_GC);
1133
1134         switch (k.k->type) {
1135         case KEY_TYPE_alloc:
1136                 ret = bch2_mark_alloc(c, k, fs_usage, journal_seq, flags);
1137                 break;
1138         case KEY_TYPE_btree_ptr:
1139                 sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
1140                         ?  c->opts.btree_node_size
1141                         : -c->opts.btree_node_size;
1142
1143                 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_BTREE,
1144                                 fs_usage, journal_seq, flags);
1145                 break;
1146         case KEY_TYPE_extent:
1147         case KEY_TYPE_reflink_v:
1148                 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_USER,
1149                                 fs_usage, journal_seq, flags);
1150                 break;
1151         case KEY_TYPE_stripe:
1152                 ret = bch2_mark_stripe(c, k, fs_usage, journal_seq, flags);
1153                 break;
1154         case KEY_TYPE_inode:
1155                 if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
1156                         fs_usage->nr_inodes++;
1157                 else
1158                         fs_usage->nr_inodes--;
1159                 break;
1160         case KEY_TYPE_reservation: {
1161                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1162
1163                 sectors *= replicas;
1164                 replicas = clamp_t(unsigned, replicas, 1,
1165                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1166
1167                 fs_usage->reserved                              += sectors;
1168                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1169                 break;
1170         }
1171         }
1172
1173         preempt_enable();
1174
1175         return ret;
1176 }
1177
1178 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
1179                   unsigned offset, s64 sectors,
1180                   struct bch_fs_usage *fs_usage,
1181                   u64 journal_seq, unsigned flags)
1182 {
1183         int ret;
1184
1185         percpu_down_read(&c->mark_lock);
1186         ret = bch2_mark_key_locked(c, k, offset, sectors,
1187                                    fs_usage, journal_seq, flags);
1188         percpu_up_read(&c->mark_lock);
1189
1190         return ret;
1191 }
1192
1193 inline int bch2_mark_overwrite(struct btree_trans *trans,
1194                                struct btree_iter *iter,
1195                                struct bkey_s_c old,
1196                                struct bkey_i *new,
1197                                struct bch_fs_usage *fs_usage,
1198                                unsigned flags)
1199 {
1200         struct bch_fs           *c = trans->c;
1201         struct btree            *b = iter->l[0].b;
1202         unsigned                offset = 0;
1203         s64                     sectors = 0;
1204
1205         flags |= BCH_BUCKET_MARK_OVERWRITE;
1206
1207         if (btree_node_is_extents(b)
1208             ? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
1209             : bkey_cmp(new->k.p, old.k->p))
1210                 return 0;
1211
1212         if (btree_node_is_extents(b)) {
1213                 switch (bch2_extent_overlap(&new->k, old.k)) {
1214                 case BCH_EXTENT_OVERLAP_ALL:
1215                         offset = 0;
1216                         sectors = -((s64) old.k->size);
1217                         break;
1218                 case BCH_EXTENT_OVERLAP_BACK:
1219                         offset = bkey_start_offset(&new->k) -
1220                                 bkey_start_offset(old.k);
1221                         sectors = bkey_start_offset(&new->k) -
1222                                 old.k->p.offset;
1223                         break;
1224                 case BCH_EXTENT_OVERLAP_FRONT:
1225                         offset = 0;
1226                         sectors = bkey_start_offset(old.k) -
1227                                 new->k.p.offset;
1228                         break;
1229                 case BCH_EXTENT_OVERLAP_MIDDLE:
1230                         offset = bkey_start_offset(&new->k) -
1231                                 bkey_start_offset(old.k);
1232                         sectors = -((s64) new->k.size);
1233                         flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
1234                         break;
1235                 }
1236
1237                 BUG_ON(sectors >= 0);
1238         }
1239
1240         return bch2_mark_key_locked(c, old, offset, sectors, fs_usage,
1241                                     trans->journal_res.seq, flags) ?: 1;
1242 }
1243
1244 int bch2_mark_update(struct btree_trans *trans,
1245                      struct btree_insert_entry *insert,
1246                      struct bch_fs_usage *fs_usage,
1247                      unsigned flags)
1248 {
1249         struct bch_fs           *c = trans->c;
1250         struct btree_iter       *iter = insert->iter;
1251         struct btree            *b = iter->l[0].b;
1252         struct btree_node_iter  node_iter = iter->l[0].iter;
1253         struct bkey_packed      *_k;
1254         int ret = 0;
1255
1256         if (!btree_node_type_needs_gc(iter->btree_id))
1257                 return 0;
1258
1259         if (!(trans->flags & BTREE_INSERT_NOMARK_INSERT))
1260                 bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k),
1261                         0, insert->k->k.size,
1262                         fs_usage, trans->journal_res.seq,
1263                         BCH_BUCKET_MARK_INSERT|flags);
1264
1265         if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
1266                 return 0;
1267
1268         /*
1269          * For non extents, we only mark the new key, not the key being
1270          * overwritten - unless we're actually deleting:
1271          */
1272         if ((iter->btree_id == BTREE_ID_ALLOC ||
1273              iter->btree_id == BTREE_ID_EC) &&
1274             !bkey_deleted(&insert->k->k))
1275                 return 0;
1276
1277         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1278                                                       KEY_TYPE_discard))) {
1279                 struct bkey             unpacked;
1280                 struct bkey_s_c         k = bkey_disassemble(b, _k, &unpacked);
1281
1282                 ret = bch2_mark_overwrite(trans, iter, k, insert->k,
1283                                           fs_usage, flags);
1284                 if (ret <= 0)
1285                         break;
1286
1287                 bch2_btree_node_iter_advance(&node_iter, b);
1288         }
1289
1290         return ret;
1291 }
1292
1293 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1294                                struct bch_fs_usage *fs_usage)
1295 {
1296         struct bch_fs *c = trans->c;
1297         struct btree_insert_entry *i;
1298         static int warned_disk_usage = 0;
1299         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1300         char buf[200];
1301
1302         if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1303                                  trans->journal_res.seq) ||
1304             warned_disk_usage ||
1305             xchg(&warned_disk_usage, 1))
1306                 return;
1307
1308         bch_err(c, "disk usage increased more than %llu sectors reserved",
1309                 disk_res_sectors);
1310
1311         trans_for_each_update_iter(trans, i) {
1312                 struct btree_iter       *iter = i->iter;
1313                 struct btree            *b = iter->l[0].b;
1314                 struct btree_node_iter  node_iter = iter->l[0].iter;
1315                 struct bkey_packed      *_k;
1316
1317                 pr_err("while inserting");
1318                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1319                 pr_err("%s", buf);
1320                 pr_err("overlapping with");
1321
1322                 node_iter = iter->l[0].iter;
1323                 while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1324                                                         KEY_TYPE_discard))) {
1325                         struct bkey             unpacked;
1326                         struct bkey_s_c         k;
1327
1328                         k = bkey_disassemble(b, _k, &unpacked);
1329
1330                         if (btree_node_is_extents(b)
1331                             ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1332                             : bkey_cmp(i->k->k.p, k.k->p))
1333                                 break;
1334
1335                         bch2_bkey_val_to_text(&PBUF(buf), c, k);
1336                         pr_err("%s", buf);
1337
1338                         bch2_btree_node_iter_advance(&node_iter, b);
1339                 }
1340         }
1341 }
1342
1343 /* trans_mark: */
1344
1345 static int trans_get_key(struct btree_trans *trans,
1346                          enum btree_id btree_id, struct bpos pos,
1347                          struct btree_iter **iter,
1348                          struct bkey_s_c *k)
1349 {
1350         struct btree_insert_entry *i;
1351         int ret;
1352
1353         for (i = trans->updates;
1354              i < trans->updates + trans->nr_updates;
1355              i++)
1356                 if (!i->deferred &&
1357                     i->iter->btree_id == btree_id &&
1358                     (btree_node_type_is_extents(btree_id)
1359                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1360                        bkey_cmp(pos, i->k->k.p) < 0
1361                      : !bkey_cmp(pos, i->iter->pos))) {
1362                         *iter   = i->iter;
1363                         *k      = bkey_i_to_s_c(i->k);
1364                         return 0;
1365                 }
1366
1367         *iter = __bch2_trans_get_iter(trans, btree_id, pos,
1368                                    BTREE_ITER_SLOTS|BTREE_ITER_INTENT, 0);
1369         if (IS_ERR(*iter))
1370                 return PTR_ERR(*iter);
1371
1372         bch2_trans_iter_free_on_commit(trans, *iter);
1373
1374         *k = bch2_btree_iter_peek_slot(*iter);
1375         ret = bkey_err(*k);
1376         if (ret)
1377                 bch2_trans_iter_put(trans, *iter);
1378         return ret;
1379 }
1380
1381 static void *trans_update_key(struct btree_trans *trans,
1382                               struct btree_iter *iter,
1383                               unsigned u64s)
1384 {
1385         struct bkey_i *new_k;
1386         unsigned i;
1387
1388         new_k = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
1389         if (IS_ERR(new_k))
1390                 return new_k;
1391
1392         bkey_init(&new_k->k);
1393         new_k->k.p = iter->pos;
1394
1395         for (i = 0; i < trans->nr_updates; i++)
1396                 if (!trans->updates[i].deferred &&
1397                     trans->updates[i].iter == iter) {
1398                         trans->updates[i].k = new_k;
1399                         return new_k;
1400                 }
1401
1402         bch2_trans_update(trans, ((struct btree_insert_entry) {
1403                 .iter = iter,
1404                 .k = new_k,
1405                 .triggered = true,
1406         }));
1407
1408         return new_k;
1409 }
1410
1411 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1412                         struct extent_ptr_decoded p,
1413                         s64 sectors, enum bch_data_type data_type)
1414 {
1415         struct bch_fs *c = trans->c;
1416         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1417         struct btree_iter *iter;
1418         struct bkey_s_c k;
1419         struct bkey_alloc_unpacked u;
1420         struct bkey_i_alloc *a;
1421         bool overflow;
1422         int ret;
1423
1424         ret = trans_get_key(trans, BTREE_ID_ALLOC,
1425                             POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr)),
1426                             &iter, &k);
1427         if (ret)
1428                 return ret;
1429
1430         if (k.k->type != KEY_TYPE_alloc) {
1431                 bch_err_ratelimited(c, "pointer to nonexistent bucket %u:%zu",
1432                                     p.ptr.dev,
1433                                     PTR_BUCKET_NR(ca, &p.ptr));
1434                 ret = -1;
1435                 goto out;
1436         }
1437
1438         u = bch2_alloc_unpack(k);
1439
1440         if (gen_after(u.gen, p.ptr.gen)) {
1441                 ret = 1;
1442                 goto out;
1443         }
1444
1445         if (!p.ptr.cached)
1446                 overflow = checked_add(u.dirty_sectors, sectors);
1447         else
1448                 overflow = checked_add(u.cached_sectors, sectors);
1449
1450         u.data_type = u.dirty_sectors || u.cached_sectors
1451                 ? data_type : 0;
1452
1453         bch2_fs_inconsistent_on(overflow, c,
1454                 "bucket sector count overflow: %u + %lli > U16_MAX",
1455                 !p.ptr.cached
1456                 ? u.dirty_sectors
1457                 : u.cached_sectors, sectors);
1458
1459         a = trans_update_key(trans, iter, BKEY_ALLOC_U64s_MAX);
1460         ret = PTR_ERR_OR_ZERO(a);
1461         if (ret)
1462                 goto out;
1463
1464         bkey_alloc_init(&a->k_i);
1465         a->k.p = iter->pos;
1466         bch2_alloc_pack(a, u);
1467 out:
1468         bch2_trans_iter_put(trans, iter);
1469         return ret;
1470 }
1471
1472 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1473                         struct bch_extent_stripe_ptr p,
1474                         s64 sectors, enum bch_data_type data_type)
1475 {
1476         struct bch_fs *c = trans->c;
1477         struct bch_replicas_padded r;
1478         struct btree_iter *iter;
1479         struct bkey_i *new_k;
1480         struct bkey_s_c k;
1481         struct bkey_s_stripe s;
1482         unsigned nr_data;
1483         s64 parity_sectors;
1484         int ret = 0;
1485
1486         ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1487         if (ret)
1488                 return ret;
1489
1490         if (k.k->type != KEY_TYPE_stripe) {
1491                 bch2_fs_inconsistent(c,
1492                         "pointer to nonexistent stripe %llu",
1493                         (u64) p.idx);
1494                 ret = -EIO;
1495                 goto out;
1496         }
1497
1498         new_k = trans_update_key(trans, iter, k.k->u64s);
1499         ret = PTR_ERR_OR_ZERO(new_k);
1500         if (ret)
1501                 goto out;
1502
1503         bkey_reassemble(new_k, k);
1504         s = bkey_i_to_s_stripe(new_k);
1505
1506         nr_data = s.v->nr_blocks - s.v->nr_redundant;
1507
1508         parity_sectors = DIV_ROUND_UP(abs(sectors) * s.v->nr_redundant, nr_data);
1509
1510         if (sectors < 0)
1511                 parity_sectors = -parity_sectors;
1512
1513         stripe_blockcount_set(s.v, p.block,
1514                 stripe_blockcount_get(s.v, p.block) +
1515                 sectors + parity_sectors);
1516
1517         bch2_bkey_to_replicas(&r.e, s.s_c);
1518
1519         update_replicas_list(trans, &r.e, sectors);
1520 out:
1521         bch2_trans_iter_put(trans, iter);
1522         return ret;
1523 }
1524
1525 static int bch2_trans_mark_extent(struct btree_trans *trans,
1526                         struct bkey_s_c k, unsigned offset,
1527                         s64 sectors, unsigned flags,
1528                         enum bch_data_type data_type)
1529 {
1530         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1531         const union bch_extent_entry *entry;
1532         struct extent_ptr_decoded p;
1533         struct bch_replicas_padded r;
1534         s64 dirty_sectors = 0;
1535         bool stale;
1536         unsigned i;
1537         int ret;
1538
1539         r.e.data_type   = data_type;
1540         r.e.nr_devs     = 0;
1541         r.e.nr_required = 1;
1542
1543         BUG_ON(!sectors);
1544
1545         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1546                 s64 disk_sectors = data_type == BCH_DATA_BTREE
1547                         ? sectors
1548                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1549
1550                 /*
1551                  * can happen due to rounding with compressed extents:
1552                  */
1553                 if (!disk_sectors)
1554                         continue;
1555
1556                 ret = bch2_trans_mark_pointer(trans, p, disk_sectors,
1557                                               data_type);
1558                 if (ret < 0)
1559                         return ret;
1560
1561                 stale = ret > 0;
1562
1563                 if (p.ptr.cached) {
1564                         if (disk_sectors && !stale)
1565                                 update_cached_sectors_list(trans, p.ptr.dev,
1566                                                            disk_sectors);
1567                 } else if (!p.ec_nr) {
1568                         dirty_sectors          += disk_sectors;
1569                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1570                 } else {
1571                         for (i = 0; i < p.ec_nr; i++) {
1572                                 ret = bch2_trans_mark_stripe_ptr(trans, p.ec[i],
1573                                                 disk_sectors, data_type);
1574                                 if (ret)
1575                                         return ret;
1576                         }
1577
1578                         r.e.nr_required = 0;
1579                 }
1580         }
1581
1582         if (dirty_sectors)
1583                 update_replicas_list(trans, &r.e, dirty_sectors);
1584
1585         return 0;
1586 }
1587
1588 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1589                         struct bkey_s_c_reflink_p p,
1590                         u64 idx, unsigned sectors,
1591                         unsigned flags)
1592 {
1593         struct bch_fs *c = trans->c;
1594         struct btree_iter *iter;
1595         struct bkey_i *new_k;
1596         struct bkey_s_c k;
1597         struct bkey_i_reflink_v *r_v;
1598         s64 ret;
1599
1600         ret = trans_get_key(trans, BTREE_ID_REFLINK,
1601                             POS(0, idx), &iter, &k);
1602         if (ret)
1603                 return ret;
1604
1605         if (k.k->type != KEY_TYPE_reflink_v) {
1606                 bch2_fs_inconsistent(c,
1607                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1608                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1609                 ret = -EIO;
1610                 goto err;
1611         }
1612
1613         if ((flags & BCH_BUCKET_MARK_OVERWRITE) &&
1614             (bkey_start_offset(k.k) < idx ||
1615              k.k->p.offset > idx + sectors))
1616                 goto out;
1617
1618         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1619         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1620
1621         new_k = trans_update_key(trans, iter, k.k->u64s);
1622         ret = PTR_ERR_OR_ZERO(new_k);
1623         if (ret)
1624                 goto err;
1625
1626         bkey_reassemble(new_k, k);
1627         r_v = bkey_i_to_reflink_v(new_k);
1628
1629         le64_add_cpu(&r_v->v.refcount,
1630                      !(flags & BCH_BUCKET_MARK_OVERWRITE) ? 1 : -1);
1631
1632         if (!r_v->v.refcount) {
1633                 r_v->k.type = KEY_TYPE_deleted;
1634                 set_bkey_val_u64s(&r_v->k, 0);
1635         }
1636 out:
1637         ret = k.k->p.offset - idx;
1638 err:
1639         bch2_trans_iter_put(trans, iter);
1640         return ret;
1641 }
1642
1643 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1644                         struct bkey_s_c_reflink_p p, unsigned offset,
1645                         s64 sectors, unsigned flags)
1646 {
1647         u64 idx = le64_to_cpu(p.v->idx) + offset;
1648         s64 ret = 0;
1649
1650         sectors = abs(sectors);
1651         BUG_ON(offset + sectors > p.k->size);
1652
1653         while (sectors) {
1654                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1655                 if (ret < 0)
1656                         break;
1657
1658                 idx += ret;
1659                 sectors = max_t(s64, 0LL, sectors - ret);
1660                 ret = 0;
1661         }
1662
1663         return ret;
1664 }
1665
1666 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
1667                         unsigned offset, s64 sectors, unsigned flags)
1668 {
1669         struct replicas_delta_list *d;
1670         struct bch_fs *c = trans->c;
1671
1672         switch (k.k->type) {
1673         case KEY_TYPE_btree_ptr:
1674                 sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE)
1675                         ?  c->opts.btree_node_size
1676                         : -c->opts.btree_node_size;
1677
1678                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1679                                               flags, BCH_DATA_BTREE);
1680         case KEY_TYPE_extent:
1681         case KEY_TYPE_reflink_v:
1682                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1683                                               flags, BCH_DATA_USER);
1684         case KEY_TYPE_inode:
1685                 d = replicas_deltas_realloc(trans, 0);
1686
1687                 if (!(flags & BCH_BUCKET_MARK_OVERWRITE))
1688                         d->fs_usage.nr_inodes++;
1689                 else
1690                         d->fs_usage.nr_inodes--;
1691                 return 0;
1692         case KEY_TYPE_reservation: {
1693                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1694
1695                 d = replicas_deltas_realloc(trans, 0);
1696
1697                 sectors *= replicas;
1698                 replicas = clamp_t(unsigned, replicas, 1,
1699                                    ARRAY_SIZE(d->fs_usage.persistent_reserved));
1700
1701                 d->fs_usage.reserved                            += sectors;
1702                 d->fs_usage.persistent_reserved[replicas - 1]   += sectors;
1703                 return 0;
1704         }
1705         case KEY_TYPE_reflink_p:
1706                 return bch2_trans_mark_reflink_p(trans,
1707                                         bkey_s_c_to_reflink_p(k),
1708                                         offset, sectors, flags);
1709         default:
1710                 return 0;
1711         }
1712 }
1713
1714 int bch2_trans_mark_update(struct btree_trans *trans,
1715                            struct btree_iter *iter,
1716                            struct bkey_i *insert)
1717 {
1718         struct btree            *b = iter->l[0].b;
1719         struct btree_node_iter  node_iter = iter->l[0].iter;
1720         struct bkey_packed      *_k;
1721         int ret;
1722
1723         if (!btree_node_type_needs_gc(iter->btree_id))
1724                 return 0;
1725
1726         ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
1727                         0, insert->k.size, BCH_BUCKET_MARK_INSERT);
1728         if (ret)
1729                 return ret;
1730
1731         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
1732                                                       KEY_TYPE_discard))) {
1733                 struct bkey             unpacked;
1734                 struct bkey_s_c         k;
1735                 unsigned                offset = 0;
1736                 s64                     sectors = 0;
1737                 unsigned                flags = BCH_BUCKET_MARK_OVERWRITE;
1738
1739                 k = bkey_disassemble(b, _k, &unpacked);
1740
1741                 if (btree_node_is_extents(b)
1742                     ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
1743                     : bkey_cmp(insert->k.p, k.k->p))
1744                         break;
1745
1746                 if (btree_node_is_extents(b)) {
1747                         switch (bch2_extent_overlap(&insert->k, k.k)) {
1748                         case BCH_EXTENT_OVERLAP_ALL:
1749                                 offset = 0;
1750                                 sectors = -((s64) k.k->size);
1751                                 break;
1752                         case BCH_EXTENT_OVERLAP_BACK:
1753                                 offset = bkey_start_offset(&insert->k) -
1754                                         bkey_start_offset(k.k);
1755                                 sectors = bkey_start_offset(&insert->k) -
1756                                         k.k->p.offset;
1757                                 break;
1758                         case BCH_EXTENT_OVERLAP_FRONT:
1759                                 offset = 0;
1760                                 sectors = bkey_start_offset(k.k) -
1761                                         insert->k.p.offset;
1762                                 break;
1763                         case BCH_EXTENT_OVERLAP_MIDDLE:
1764                                 offset = bkey_start_offset(&insert->k) -
1765                                         bkey_start_offset(k.k);
1766                                 sectors = -((s64) insert->k.size);
1767                                 flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
1768                                 break;
1769                         }
1770
1771                         BUG_ON(sectors >= 0);
1772                 }
1773
1774                 ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
1775                 if (ret)
1776                         return ret;
1777
1778                 bch2_btree_node_iter_advance(&node_iter, b);
1779         }
1780
1781         return 0;
1782 }
1783
1784 /* Disk reservations: */
1785
1786 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
1787 {
1788         percpu_u64_set(&c->pcpu->sectors_available, 0);
1789
1790         return avail_factor(__bch2_fs_usage_read_short(c).free);
1791 }
1792
1793 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
1794 {
1795         percpu_down_read(&c->mark_lock);
1796         this_cpu_sub(c->usage[0]->online_reserved,
1797                      res->sectors);
1798         percpu_up_read(&c->mark_lock);
1799
1800         res->sectors = 0;
1801 }
1802
1803 #define SECTORS_CACHE   1024
1804
1805 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1806                               unsigned sectors, int flags)
1807 {
1808         struct bch_fs_pcpu *pcpu;
1809         u64 old, v, get;
1810         s64 sectors_available;
1811         int ret;
1812
1813         percpu_down_read(&c->mark_lock);
1814         preempt_disable();
1815         pcpu = this_cpu_ptr(c->pcpu);
1816
1817         if (sectors <= pcpu->sectors_available)
1818                 goto out;
1819
1820         v = atomic64_read(&c->sectors_available);
1821         do {
1822                 old = v;
1823                 get = min((u64) sectors + SECTORS_CACHE, old);
1824
1825                 if (get < sectors) {
1826                         preempt_enable();
1827                         percpu_up_read(&c->mark_lock);
1828                         goto recalculate;
1829                 }
1830         } while ((v = atomic64_cmpxchg(&c->sectors_available,
1831                                        old, old - get)) != old);
1832
1833         pcpu->sectors_available         += get;
1834
1835 out:
1836         pcpu->sectors_available         -= sectors;
1837         this_cpu_add(c->usage[0]->online_reserved, sectors);
1838         res->sectors                    += sectors;
1839
1840         preempt_enable();
1841         percpu_up_read(&c->mark_lock);
1842         return 0;
1843
1844 recalculate:
1845         percpu_down_write(&c->mark_lock);
1846
1847         sectors_available = bch2_recalc_sectors_available(c);
1848
1849         if (sectors <= sectors_available ||
1850             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1851                 atomic64_set(&c->sectors_available,
1852                              max_t(s64, 0, sectors_available - sectors));
1853                 this_cpu_add(c->usage[0]->online_reserved, sectors);
1854                 res->sectors                    += sectors;
1855                 ret = 0;
1856         } else {
1857                 atomic64_set(&c->sectors_available, sectors_available);
1858                 ret = -ENOSPC;
1859         }
1860
1861         percpu_up_write(&c->mark_lock);
1862
1863         return ret;
1864 }
1865
1866 /* Startup/shutdown: */
1867
1868 static void buckets_free_rcu(struct rcu_head *rcu)
1869 {
1870         struct bucket_array *buckets =
1871                 container_of(rcu, struct bucket_array, rcu);
1872
1873         kvpfree(buckets,
1874                 sizeof(struct bucket_array) +
1875                 buckets->nbuckets * sizeof(struct bucket));
1876 }
1877
1878 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1879 {
1880         struct bucket_array *buckets = NULL, *old_buckets = NULL;
1881         unsigned long *buckets_nouse = NULL;
1882         unsigned long *buckets_written = NULL;
1883         alloc_fifo      free[RESERVE_NR];
1884         alloc_fifo      free_inc;
1885         alloc_heap      alloc_heap;
1886         copygc_heap     copygc_heap;
1887
1888         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1889                              ca->mi.bucket_size / c->opts.btree_node_size);
1890         /* XXX: these should be tunable */
1891         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
1892         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 7);
1893         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
1894                                       btree_reserve * 2);
1895         bool resize = ca->buckets[0] != NULL,
1896              start_copygc = ca->copygc_thread != NULL;
1897         int ret = -ENOMEM;
1898         unsigned i;
1899
1900         memset(&free,           0, sizeof(free));
1901         memset(&free_inc,       0, sizeof(free_inc));
1902         memset(&alloc_heap,     0, sizeof(alloc_heap));
1903         memset(&copygc_heap,    0, sizeof(copygc_heap));
1904
1905         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
1906                                             nbuckets * sizeof(struct bucket),
1907                                             GFP_KERNEL|__GFP_ZERO)) ||
1908             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1909                                             sizeof(unsigned long),
1910                                             GFP_KERNEL|__GFP_ZERO)) ||
1911             !(buckets_written   = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1912                                             sizeof(unsigned long),
1913                                             GFP_KERNEL|__GFP_ZERO)) ||
1914             !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
1915             !init_fifo(&free[RESERVE_MOVINGGC],
1916                        copygc_reserve, GFP_KERNEL) ||
1917             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
1918             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
1919             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) ||
1920             !init_heap(&copygc_heap,    copygc_reserve, GFP_KERNEL))
1921                 goto err;
1922
1923         buckets->first_bucket   = ca->mi.first_bucket;
1924         buckets->nbuckets       = nbuckets;
1925
1926         bch2_copygc_stop(ca);
1927
1928         if (resize) {
1929                 down_write(&c->gc_lock);
1930                 down_write(&ca->bucket_lock);
1931                 percpu_down_write(&c->mark_lock);
1932         }
1933
1934         old_buckets = bucket_array(ca);
1935
1936         if (resize) {
1937                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
1938
1939                 memcpy(buckets->b,
1940                        old_buckets->b,
1941                        n * sizeof(struct bucket));
1942                 memcpy(buckets_nouse,
1943                        ca->buckets_nouse,
1944                        BITS_TO_LONGS(n) * sizeof(unsigned long));
1945                 memcpy(buckets_written,
1946                        ca->buckets_written,
1947                        BITS_TO_LONGS(n) * sizeof(unsigned long));
1948         }
1949
1950         rcu_assign_pointer(ca->buckets[0], buckets);
1951         buckets = old_buckets;
1952
1953         swap(ca->buckets_nouse, buckets_nouse);
1954         swap(ca->buckets_written, buckets_written);
1955
1956         if (resize)
1957                 percpu_up_write(&c->mark_lock);
1958
1959         spin_lock(&c->freelist_lock);
1960         for (i = 0; i < RESERVE_NR; i++) {
1961                 fifo_move(&free[i], &ca->free[i]);
1962                 swap(ca->free[i], free[i]);
1963         }
1964         fifo_move(&free_inc, &ca->free_inc);
1965         swap(ca->free_inc, free_inc);
1966         spin_unlock(&c->freelist_lock);
1967
1968         /* with gc lock held, alloc_heap can't be in use: */
1969         swap(ca->alloc_heap, alloc_heap);
1970
1971         /* and we shut down copygc: */
1972         swap(ca->copygc_heap, copygc_heap);
1973
1974         nbuckets = ca->mi.nbuckets;
1975
1976         if (resize) {
1977                 up_write(&ca->bucket_lock);
1978                 up_write(&c->gc_lock);
1979         }
1980
1981         if (start_copygc &&
1982             bch2_copygc_start(c, ca))
1983                 bch_err(ca, "error restarting copygc thread");
1984
1985         ret = 0;
1986 err:
1987         free_heap(&copygc_heap);
1988         free_heap(&alloc_heap);
1989         free_fifo(&free_inc);
1990         for (i = 0; i < RESERVE_NR; i++)
1991                 free_fifo(&free[i]);
1992         kvpfree(buckets_nouse,
1993                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
1994         kvpfree(buckets_written,
1995                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
1996         if (buckets)
1997                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
1998
1999         return ret;
2000 }
2001
2002 void bch2_dev_buckets_free(struct bch_dev *ca)
2003 {
2004         unsigned i;
2005
2006         free_heap(&ca->copygc_heap);
2007         free_heap(&ca->alloc_heap);
2008         free_fifo(&ca->free_inc);
2009         for (i = 0; i < RESERVE_NR; i++)
2010                 free_fifo(&ca->free[i]);
2011         kvpfree(ca->buckets_written,
2012                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2013         kvpfree(ca->buckets_nouse,
2014                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2015         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2016                 sizeof(struct bucket_array) +
2017                 ca->mi.nbuckets * sizeof(struct bucket));
2018
2019         free_percpu(ca->usage[0]);
2020 }
2021
2022 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2023 {
2024         if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2025                 return -ENOMEM;
2026
2027         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2028 }