]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
Update bcachefs sources to 4837f82ee1 bcachefs: Use cached iterators for alloc btree
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  *
7  * Bucket states:
8  * - free bucket: mark == 0
9  *   The bucket contains no data and will not be read
10  *
11  * - allocator bucket: owned_by_allocator == 1
12  *   The bucket is on a free list, or it is an open bucket
13  *
14  * - cached bucket: owned_by_allocator == 0 &&
15  *                  dirty_sectors == 0 &&
16  *                  cached_sectors > 0
17  *   The bucket contains data but may be safely discarded as there are
18  *   enough replicas of the data on other cache devices, or it has been
19  *   written back to the backing device
20  *
21  * - dirty bucket: owned_by_allocator == 0 &&
22  *                 dirty_sectors > 0
23  *   The bucket contains data that we must not discard (either only copy,
24  *   or one of the 'main copies' for data requiring multiple replicas)
25  *
26  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27  *   This is a btree node, journal or gen/prio bucket
28  *
29  * Lifecycle:
30  *
31  * bucket invalidated => bucket on freelist => open bucket =>
32  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
33  *
34  * Note that cache promotion can skip the dirty bucket step, as data
35  * is copied from a deeper tier to a shallower tier, onto a cached
36  * bucket.
37  * Note also that a cached bucket can spontaneously become dirty --
38  * see below.
39  *
40  * Only a traversal of the key space can determine whether a bucket is
41  * truly dirty or cached.
42  *
43  * Transitions:
44  *
45  * - free => allocator: bucket was invalidated
46  * - cached => allocator: bucket was invalidated
47  *
48  * - allocator => dirty: open bucket was filled up
49  * - allocator => cached: open bucket was filled up
50  * - allocator => metadata: metadata was allocated
51  *
52  * - dirty => cached: dirty sectors were copied to a deeper tier
53  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54  * - cached => free: cached sectors were overwritten
55  *
56  * - metadata => free: metadata was freed
57  *
58  * Oddities:
59  * - cached => dirty: a device was removed so formerly replicated data
60  *                    is no longer sufficiently replicated
61  * - free => cached: cannot happen
62  * - free => dirty: cannot happen
63  * - free => metadata: cannot happen
64  */
65
66 #include "bcachefs.h"
67 #include "alloc_background.h"
68 #include "bset.h"
69 #include "btree_gc.h"
70 #include "btree_update.h"
71 #include "buckets.h"
72 #include "ec.h"
73 #include "error.h"
74 #include "movinggc.h"
75 #include "replicas.h"
76
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
79
80 /*
81  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
82  * wraparound:
83  */
84 void bch2_bucket_seq_cleanup(struct bch_fs *c)
85 {
86         u64 journal_seq = atomic64_read(&c->journal.seq);
87         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
88         struct bch_dev *ca;
89         struct bucket_array *buckets;
90         struct bucket *g;
91         struct bucket_mark m;
92         unsigned i;
93
94         if (journal_seq - c->last_bucket_seq_cleanup <
95             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
96                 return;
97
98         c->last_bucket_seq_cleanup = journal_seq;
99
100         for_each_member_device(ca, c, i) {
101                 down_read(&ca->bucket_lock);
102                 buckets = bucket_array(ca);
103
104                 for_each_bucket(g, buckets) {
105                         bucket_cmpxchg(g, m, ({
106                                 if (!m.journal_seq_valid ||
107                                     bucket_needs_journal_commit(m, last_seq_ondisk))
108                                         break;
109
110                                 m.journal_seq_valid = 0;
111                         }));
112                 }
113                 up_read(&ca->bucket_lock);
114         }
115 }
116
117 void bch2_fs_usage_initialize(struct bch_fs *c)
118 {
119         struct bch_fs_usage *usage;
120         unsigned i;
121
122         percpu_down_write(&c->mark_lock);
123         usage = c->usage_base;
124
125         bch2_fs_usage_acc_to_base(c, 0);
126         bch2_fs_usage_acc_to_base(c, 1);
127
128         for (i = 0; i < BCH_REPLICAS_MAX; i++)
129                 usage->reserved += usage->persistent_reserved[i];
130
131         for (i = 0; i < c->replicas.nr; i++) {
132                 struct bch_replicas_entry *e =
133                         cpu_replicas_entry(&c->replicas, i);
134
135                 switch (e->data_type) {
136                 case BCH_DATA_BTREE:
137                         usage->btree    += usage->replicas[i];
138                         break;
139                 case BCH_DATA_USER:
140                         usage->data     += usage->replicas[i];
141                         break;
142                 case BCH_DATA_CACHED:
143                         usage->cached   += usage->replicas[i];
144                         break;
145                 }
146         }
147
148         percpu_up_write(&c->mark_lock);
149 }
150
151 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
152 {
153         if (fs_usage == c->usage_scratch)
154                 mutex_unlock(&c->usage_scratch_lock);
155         else
156                 kfree(fs_usage);
157 }
158
159 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
160 {
161         struct bch_fs_usage *ret;
162         unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
163
164         ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
165         if (ret)
166                 return ret;
167
168         if (mutex_trylock(&c->usage_scratch_lock))
169                 goto out_pool;
170
171         ret = kzalloc(bytes, GFP_NOFS);
172         if (ret)
173                 return ret;
174
175         mutex_lock(&c->usage_scratch_lock);
176 out_pool:
177         ret = c->usage_scratch;
178         memset(ret, 0, bytes);
179         return ret;
180 }
181
182 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
183 {
184         struct bch_dev_usage ret;
185
186         memset(&ret, 0, sizeof(ret));
187         acc_u64s_percpu((u64 *) &ret,
188                         (u64 __percpu *) ca->usage[0],
189                         sizeof(ret) / sizeof(u64));
190
191         return ret;
192 }
193
194 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
195                                                 unsigned journal_seq,
196                                                 bool gc)
197 {
198         return this_cpu_ptr(gc
199                             ? c->usage_gc
200                             : c->usage[journal_seq & 1]);
201 }
202
203 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
204 {
205         ssize_t offset = v - (u64 *) c->usage_base;
206         unsigned seq;
207         u64 ret;
208
209         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
210         percpu_rwsem_assert_held(&c->mark_lock);
211
212         do {
213                 seq = read_seqcount_begin(&c->usage_lock);
214                 ret = *v +
215                         percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
216                         percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
217         } while (read_seqcount_retry(&c->usage_lock, seq));
218
219         return ret;
220 }
221
222 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
223 {
224         struct bch_fs_usage *ret;
225         unsigned seq, v, u64s = fs_usage_u64s(c);
226 retry:
227         ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
228         if (unlikely(!ret))
229                 return NULL;
230
231         percpu_down_read(&c->mark_lock);
232
233         v = fs_usage_u64s(c);
234         if (unlikely(u64s != v)) {
235                 u64s = v;
236                 percpu_up_read(&c->mark_lock);
237                 kfree(ret);
238                 goto retry;
239         }
240
241         do {
242                 seq = read_seqcount_begin(&c->usage_lock);
243                 memcpy(ret, c->usage_base, u64s * sizeof(u64));
244                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
245                 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[1], u64s);
246         } while (read_seqcount_retry(&c->usage_lock, seq));
247
248         return ret;
249 }
250
251 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
252 {
253         unsigned u64s = fs_usage_u64s(c);
254
255         BUG_ON(idx >= 2);
256
257         write_seqcount_begin(&c->usage_lock);
258
259         acc_u64s_percpu((u64 *) c->usage_base,
260                         (u64 __percpu *) c->usage[idx], u64s);
261         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
262
263         write_seqcount_end(&c->usage_lock);
264 }
265
266 void bch2_fs_usage_to_text(struct printbuf *out,
267                            struct bch_fs *c,
268                            struct bch_fs_usage *fs_usage)
269 {
270         unsigned i;
271
272         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
273
274         pr_buf(out, "hidden:\t\t\t\t%llu\n",
275                fs_usage->hidden);
276         pr_buf(out, "data:\t\t\t\t%llu\n",
277                fs_usage->data);
278         pr_buf(out, "cached:\t\t\t\t%llu\n",
279                fs_usage->cached);
280         pr_buf(out, "reserved:\t\t\t%llu\n",
281                fs_usage->reserved);
282         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
283                fs_usage->nr_inodes);
284         pr_buf(out, "online reserved:\t\t%llu\n",
285                fs_usage->online_reserved);
286
287         for (i = 0;
288              i < ARRAY_SIZE(fs_usage->persistent_reserved);
289              i++) {
290                 pr_buf(out, "%u replicas:\n", i + 1);
291                 pr_buf(out, "\treserved:\t\t%llu\n",
292                        fs_usage->persistent_reserved[i]);
293         }
294
295         for (i = 0; i < c->replicas.nr; i++) {
296                 struct bch_replicas_entry *e =
297                         cpu_replicas_entry(&c->replicas, i);
298
299                 pr_buf(out, "\t");
300                 bch2_replicas_entry_to_text(out, e);
301                 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
302         }
303 }
304
305 #define RESERVE_FACTOR  6
306
307 static u64 reserve_factor(u64 r)
308 {
309         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
310 }
311
312 static u64 avail_factor(u64 r)
313 {
314         return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
315 }
316
317 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
318 {
319         return min(fs_usage->hidden +
320                    fs_usage->btree +
321                    fs_usage->data +
322                    reserve_factor(fs_usage->reserved +
323                                   fs_usage->online_reserved),
324                    c->capacity);
325 }
326
327 static struct bch_fs_usage_short
328 __bch2_fs_usage_read_short(struct bch_fs *c)
329 {
330         struct bch_fs_usage_short ret;
331         u64 data, reserved;
332
333         ret.capacity = c->capacity -
334                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
335
336         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
337                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
338         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
339                 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
340
341         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
342         ret.free        = ret.capacity - ret.used;
343
344         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
345
346         return ret;
347 }
348
349 struct bch_fs_usage_short
350 bch2_fs_usage_read_short(struct bch_fs *c)
351 {
352         struct bch_fs_usage_short ret;
353
354         percpu_down_read(&c->mark_lock);
355         ret = __bch2_fs_usage_read_short(c);
356         percpu_up_read(&c->mark_lock);
357
358         return ret;
359 }
360
361 static inline int is_unavailable_bucket(struct bucket_mark m)
362 {
363         return !is_available_bucket(m);
364 }
365
366 static inline int is_fragmented_bucket(struct bucket_mark m,
367                                        struct bch_dev *ca)
368 {
369         if (!m.owned_by_allocator &&
370             m.data_type == BCH_DATA_USER &&
371             bucket_sectors_used(m))
372                 return max_t(int, 0, (int) ca->mi.bucket_size -
373                              bucket_sectors_used(m));
374         return 0;
375 }
376
377 static inline enum bch_data_type bucket_type(struct bucket_mark m)
378 {
379         return m.cached_sectors && !m.dirty_sectors
380                 ? BCH_DATA_CACHED
381                 : m.data_type;
382 }
383
384 static bool bucket_became_unavailable(struct bucket_mark old,
385                                       struct bucket_mark new)
386 {
387         return is_available_bucket(old) &&
388                !is_available_bucket(new);
389 }
390
391 int bch2_fs_usage_apply(struct bch_fs *c,
392                         struct bch_fs_usage *fs_usage,
393                         struct disk_reservation *disk_res,
394                         unsigned journal_seq)
395 {
396         s64 added = fs_usage->data + fs_usage->reserved;
397         s64 should_not_have_added;
398         int ret = 0;
399
400         percpu_rwsem_assert_held(&c->mark_lock);
401
402         /*
403          * Not allowed to reduce sectors_available except by getting a
404          * reservation:
405          */
406         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
407         if (WARN_ONCE(should_not_have_added > 0,
408                       "disk usage increased by %lli without a reservation",
409                       should_not_have_added)) {
410                 atomic64_sub(should_not_have_added, &c->sectors_available);
411                 added -= should_not_have_added;
412                 ret = -1;
413         }
414
415         if (added > 0) {
416                 disk_res->sectors               -= added;
417                 fs_usage->online_reserved       -= added;
418         }
419
420         preempt_disable();
421         acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
422                  (u64 *) fs_usage, fs_usage_u64s(c));
423         preempt_enable();
424
425         return ret;
426 }
427
428 static inline void account_bucket(struct bch_fs_usage *fs_usage,
429                                   struct bch_dev_usage *dev_usage,
430                                   enum bch_data_type type,
431                                   int nr, s64 size)
432 {
433         if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
434                 fs_usage->hidden        += size;
435
436         dev_usage->buckets[type]        += nr;
437 }
438
439 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
440                                   struct bch_fs_usage *fs_usage,
441                                   struct bucket_mark old, struct bucket_mark new,
442                                   bool gc)
443 {
444         struct bch_dev_usage *dev_usage;
445
446         percpu_rwsem_assert_held(&c->mark_lock);
447
448         preempt_disable();
449         dev_usage = this_cpu_ptr(ca->usage[gc]);
450
451         if (bucket_type(old))
452                 account_bucket(fs_usage, dev_usage, bucket_type(old),
453                                -1, -ca->mi.bucket_size);
454
455         if (bucket_type(new))
456                 account_bucket(fs_usage, dev_usage, bucket_type(new),
457                                1, ca->mi.bucket_size);
458
459         dev_usage->buckets_alloc +=
460                 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
461         dev_usage->buckets_ec +=
462                 (int) new.stripe - (int) old.stripe;
463         dev_usage->buckets_unavailable +=
464                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
465
466         dev_usage->sectors[old.data_type] -= old.dirty_sectors;
467         dev_usage->sectors[new.data_type] += new.dirty_sectors;
468         dev_usage->sectors[BCH_DATA_CACHED] +=
469                 (int) new.cached_sectors - (int) old.cached_sectors;
470         dev_usage->sectors_fragmented +=
471                 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
472         preempt_enable();
473
474         if (!is_available_bucket(old) && is_available_bucket(new))
475                 bch2_wake_allocator(ca);
476 }
477
478 void bch2_dev_usage_from_buckets(struct bch_fs *c)
479 {
480         struct bch_dev *ca;
481         struct bucket_mark old = { .v.counter = 0 };
482         struct bucket_array *buckets;
483         struct bucket *g;
484         unsigned i;
485         int cpu;
486
487         c->usage_base->hidden = 0;
488
489         for_each_member_device(ca, c, i) {
490                 for_each_possible_cpu(cpu)
491                         memset(per_cpu_ptr(ca->usage[0], cpu), 0,
492                                sizeof(*ca->usage[0]));
493
494                 buckets = bucket_array(ca);
495
496                 for_each_bucket(g, buckets)
497                         bch2_dev_usage_update(c, ca, c->usage_base,
498                                               old, g->mark, false);
499         }
500 }
501
502 static inline int update_replicas(struct bch_fs *c,
503                                   struct bch_fs_usage *fs_usage,
504                                   struct bch_replicas_entry *r,
505                                   s64 sectors)
506 {
507         int idx = bch2_replicas_entry_idx(c, r);
508
509         if (idx < 0)
510                 return -1;
511
512         if (!fs_usage)
513                 return 0;
514
515         switch (r->data_type) {
516         case BCH_DATA_BTREE:
517                 fs_usage->btree         += sectors;
518                 break;
519         case BCH_DATA_USER:
520                 fs_usage->data          += sectors;
521                 break;
522         case BCH_DATA_CACHED:
523                 fs_usage->cached        += sectors;
524                 break;
525         }
526         fs_usage->replicas[idx]         += sectors;
527         return 0;
528 }
529
530 static inline void update_cached_sectors(struct bch_fs *c,
531                                          struct bch_fs_usage *fs_usage,
532                                          unsigned dev, s64 sectors)
533 {
534         struct bch_replicas_padded r;
535
536         bch2_replicas_entry_cached(&r.e, dev);
537
538         update_replicas(c, fs_usage, &r.e, sectors);
539 }
540
541 static struct replicas_delta_list *
542 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
543 {
544         struct replicas_delta_list *d = trans->fs_usage_deltas;
545         unsigned new_size = d ? (d->size + more) * 2 : 128;
546
547         if (!d || d->used + more > d->size) {
548                 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
549                 BUG_ON(!d);
550
551                 d->size = new_size;
552                 trans->fs_usage_deltas = d;
553         }
554         return d;
555 }
556
557 static inline void update_replicas_list(struct btree_trans *trans,
558                                         struct bch_replicas_entry *r,
559                                         s64 sectors)
560 {
561         struct replicas_delta_list *d;
562         struct replicas_delta *n;
563         unsigned b;
564
565         if (!sectors)
566                 return;
567
568         b = replicas_entry_bytes(r) + 8;
569         d = replicas_deltas_realloc(trans, b);
570
571         n = (void *) d->d + d->used;
572         n->delta = sectors;
573         memcpy(&n->r, r, replicas_entry_bytes(r));
574         d->used += b;
575 }
576
577 static inline void update_cached_sectors_list(struct btree_trans *trans,
578                                               unsigned dev, s64 sectors)
579 {
580         struct bch_replicas_padded r;
581
582         bch2_replicas_entry_cached(&r.e, dev);
583
584         update_replicas_list(trans, &r.e, sectors);
585 }
586
587 static inline struct replicas_delta *
588 replicas_delta_next(struct replicas_delta *d)
589 {
590         return (void *) d + replicas_entry_bytes(&d->r) + 8;
591 }
592
593 int bch2_replicas_delta_list_apply(struct bch_fs *c,
594                                    struct bch_fs_usage *fs_usage,
595                                    struct replicas_delta_list *r)
596 {
597         struct replicas_delta *d = r->d;
598         struct replicas_delta *top = (void *) r->d + r->used;
599         unsigned i;
600
601         for (d = r->d; d != top; d = replicas_delta_next(d))
602                 if (update_replicas(c, fs_usage, &d->r, d->delta)) {
603                         top = d;
604                         goto unwind;
605                 }
606
607         if (!fs_usage)
608                 return 0;
609
610         fs_usage->nr_inodes += r->nr_inodes;
611
612         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
613                 fs_usage->reserved += r->persistent_reserved[i];
614                 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
615         }
616
617         return 0;
618 unwind:
619         for (d = r->d; d != top; d = replicas_delta_next(d))
620                 update_replicas(c, fs_usage, &d->r, -d->delta);
621         return -1;
622 }
623
624 #define do_mark_fn(fn, c, pos, flags, ...)                              \
625 ({                                                                      \
626         int gc, ret = 0;                                                \
627                                                                         \
628         percpu_rwsem_assert_held(&c->mark_lock);                        \
629                                                                         \
630         for (gc = 0; gc < 2 && !ret; gc++)                              \
631                 if (!gc == !(flags & BTREE_TRIGGER_GC) ||               \
632                     (gc && gc_visited(c, pos)))                         \
633                         ret = fn(c, __VA_ARGS__, gc);                   \
634         ret;                                                            \
635 })
636
637 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
638                                     size_t b, struct bucket_mark *ret,
639                                     bool gc)
640 {
641         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
642         struct bucket *g = __bucket(ca, b, gc);
643         struct bucket_mark old, new;
644
645         old = bucket_cmpxchg(g, new, ({
646                 BUG_ON(!is_available_bucket(new));
647
648                 new.owned_by_allocator  = true;
649                 new.data_type           = 0;
650                 new.cached_sectors      = 0;
651                 new.dirty_sectors       = 0;
652                 new.gen++;
653         }));
654
655         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
656
657         if (old.cached_sectors)
658                 update_cached_sectors(c, fs_usage, ca->dev_idx,
659                                       -((s64) old.cached_sectors));
660
661         if (!gc)
662                 *ret = old;
663         return 0;
664 }
665
666 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
667                             size_t b, struct bucket_mark *old)
668 {
669         do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
670                    ca, b, old);
671
672         if (!old->owned_by_allocator && old->cached_sectors)
673                 trace_invalidate(ca, bucket_to_sector(ca, b),
674                                  old->cached_sectors);
675 }
676
677 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
678                                     size_t b, bool owned_by_allocator,
679                                     bool gc)
680 {
681         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
682         struct bucket *g = __bucket(ca, b, gc);
683         struct bucket_mark old, new;
684
685         old = bucket_cmpxchg(g, new, ({
686                 new.owned_by_allocator  = owned_by_allocator;
687         }));
688
689         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
690
691         BUG_ON(!gc &&
692                !owned_by_allocator && !old.owned_by_allocator);
693
694         return 0;
695 }
696
697 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
698                             size_t b, bool owned_by_allocator,
699                             struct gc_pos pos, unsigned flags)
700 {
701         preempt_disable();
702
703         do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
704                    ca, b, owned_by_allocator);
705
706         preempt_enable();
707 }
708
709 static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
710                            struct bch_fs_usage *fs_usage,
711                            u64 journal_seq, unsigned flags)
712 {
713         bool gc = flags & BTREE_TRIGGER_GC;
714         struct bkey_alloc_unpacked u;
715         struct bch_dev *ca;
716         struct bucket *g;
717         struct bucket_mark old, m;
718
719         /*
720          * alloc btree is read in by bch2_alloc_read, not gc:
721          */
722         if ((flags & BTREE_TRIGGER_GC) &&
723             !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
724                 return 0;
725
726         ca = bch_dev_bkey_exists(c, k.k->p.inode);
727
728         if (k.k->p.offset >= ca->mi.nbuckets)
729                 return 0;
730
731         g = __bucket(ca, k.k->p.offset, gc);
732         u = bch2_alloc_unpack(k);
733
734         old = bucket_cmpxchg(g, m, ({
735                 m.gen                   = u.gen;
736                 m.data_type             = u.data_type;
737                 m.dirty_sectors         = u.dirty_sectors;
738                 m.cached_sectors        = u.cached_sectors;
739
740                 if (journal_seq) {
741                         m.journal_seq_valid     = 1;
742                         m.journal_seq           = journal_seq;
743                 }
744         }));
745
746         if (!(flags & BTREE_TRIGGER_ALLOC_READ))
747                 bch2_dev_usage_update(c, ca, fs_usage, old, m, gc);
748
749         g->io_time[READ]        = u.read_time;
750         g->io_time[WRITE]       = u.write_time;
751         g->oldest_gen           = u.oldest_gen;
752         g->gen_valid            = 1;
753
754         /*
755          * need to know if we're getting called from the invalidate path or
756          * not:
757          */
758
759         if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
760             old.cached_sectors) {
761                 update_cached_sectors(c, fs_usage, ca->dev_idx,
762                                       -old.cached_sectors);
763                 trace_invalidate(ca, bucket_to_sector(ca, k.k->p.offset),
764                                  old.cached_sectors);
765         }
766
767         return 0;
768 }
769
770 #define checked_add(a, b)                                       \
771 ({                                                              \
772         unsigned _res = (unsigned) (a) + (b);                   \
773         bool overflow = _res > U16_MAX;                         \
774         if (overflow)                                           \
775                 _res = U16_MAX;                                 \
776         (a) = _res;                                             \
777         overflow;                                               \
778 })
779
780 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
781                                        size_t b, enum bch_data_type data_type,
782                                        unsigned sectors, bool gc)
783 {
784         struct bucket *g = __bucket(ca, b, gc);
785         struct bucket_mark old, new;
786         bool overflow;
787
788         BUG_ON(data_type != BCH_DATA_SB &&
789                data_type != BCH_DATA_JOURNAL);
790
791         old = bucket_cmpxchg(g, new, ({
792                 new.data_type   = data_type;
793                 overflow = checked_add(new.dirty_sectors, sectors);
794         }));
795
796         bch2_fs_inconsistent_on(old.data_type &&
797                                 old.data_type != data_type, c,
798                 "different types of data in same bucket: %s, %s",
799                 bch2_data_types[old.data_type],
800                 bch2_data_types[data_type]);
801
802         bch2_fs_inconsistent_on(overflow, c,
803                 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
804                 ca->dev_idx, b, new.gen,
805                 bch2_data_types[old.data_type ?: data_type],
806                 old.dirty_sectors, sectors);
807
808         if (c)
809                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
810                                       old, new, gc);
811
812         return 0;
813 }
814
815 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
816                                size_t b, enum bch_data_type type,
817                                unsigned sectors, struct gc_pos pos,
818                                unsigned flags)
819 {
820         BUG_ON(type != BCH_DATA_SB &&
821                type != BCH_DATA_JOURNAL);
822
823         preempt_disable();
824
825         if (likely(c)) {
826                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
827                            ca, b, type, sectors);
828         } else {
829                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
830         }
831
832         preempt_enable();
833 }
834
835 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
836 {
837         return DIV_ROUND_UP(sectors * n, d);
838 }
839
840 static s64 __ptr_disk_sectors_delta(unsigned old_size,
841                                     unsigned offset, s64 delta,
842                                     unsigned flags,
843                                     unsigned n, unsigned d)
844 {
845         BUG_ON(!n || !d);
846
847         if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
848                 BUG_ON(offset + -delta > old_size);
849
850                 return -disk_sectors_scaled(n, d, old_size) +
851                         disk_sectors_scaled(n, d, offset) +
852                         disk_sectors_scaled(n, d, old_size - offset + delta);
853         } else if (flags & BTREE_TRIGGER_OVERWRITE) {
854                 BUG_ON(offset + -delta > old_size);
855
856                 return -disk_sectors_scaled(n, d, old_size) +
857                         disk_sectors_scaled(n, d, old_size + delta);
858         } else {
859                 return  disk_sectors_scaled(n, d, delta);
860         }
861 }
862
863 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
864                                   unsigned offset, s64 delta,
865                                   unsigned flags)
866 {
867         return __ptr_disk_sectors_delta(p.crc.live_size,
868                                         offset, delta, flags,
869                                         p.crc.compressed_size,
870                                         p.crc.uncompressed_size);
871 }
872
873 static void bucket_set_stripe(struct bch_fs *c,
874                               const struct bch_stripe *v,
875                               struct bch_fs_usage *fs_usage,
876                               u64 journal_seq,
877                               unsigned flags)
878 {
879         bool enabled = !(flags & BTREE_TRIGGER_OVERWRITE);
880         bool gc = flags & BTREE_TRIGGER_GC;
881         unsigned i;
882
883         for (i = 0; i < v->nr_blocks; i++) {
884                 const struct bch_extent_ptr *ptr = v->ptrs + i;
885                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
886                 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
887                 struct bucket_mark new, old;
888
889                 old = bucket_cmpxchg(g, new, ({
890                         new.stripe                      = enabled;
891                         if (journal_seq) {
892                                 new.journal_seq_valid   = 1;
893                                 new.journal_seq         = journal_seq;
894                         }
895                 }));
896
897                 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
898
899                 /*
900                  * XXX write repair code for these, flag stripe as possibly bad
901                  */
902                 if (old.gen != ptr->gen)
903                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
904                                       "stripe with stale pointer");
905 #if 0
906                 /*
907                  * We'd like to check for these, but these checks don't work
908                  * yet:
909                  */
910                 if (old.stripe && enabled)
911                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
912                                       "multiple stripes using same bucket");
913
914                 if (!old.stripe && !enabled)
915                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
916                                       "deleting stripe but bucket not marked as stripe bucket");
917 #endif
918         }
919 }
920
921 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
922                           struct extent_ptr_decoded p,
923                           s64 sectors, enum bch_data_type ptr_data_type,
924                           u8 bucket_gen, u8 *bucket_data_type,
925                           u16 *dirty_sectors, u16 *cached_sectors)
926 {
927         u16 *dst_sectors = !p.ptr.cached
928                 ? dirty_sectors
929                 : cached_sectors;
930         u16 orig_sectors = *dst_sectors;
931         char buf[200];
932
933         if (gen_after(p.ptr.gen, bucket_gen)) {
934                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
935                         "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
936                         "while marking %s",
937                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
938                         bucket_gen,
939                         bch2_data_types[*bucket_data_type ?: ptr_data_type],
940                         p.ptr.gen,
941                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
942                 return -EIO;
943         }
944
945         if (gen_cmp(bucket_gen, p.ptr.gen) >= 96U) {
946                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
947                         "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
948                         "while marking %s",
949                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
950                         bucket_gen,
951                         bch2_data_types[*bucket_data_type ?: ptr_data_type],
952                         p.ptr.gen,
953                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
954                 return -EIO;
955         }
956
957         if (bucket_gen != p.ptr.gen && !p.ptr.cached) {
958                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
959                         "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
960                         "while marking %s",
961                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
962                         bucket_gen,
963                         bch2_data_types[*bucket_data_type ?: ptr_data_type],
964                         p.ptr.gen,
965                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
966                 return -EIO;
967         }
968
969         if (bucket_gen != p.ptr.gen)
970                 return 1;
971
972         if (*bucket_data_type && *bucket_data_type != ptr_data_type) {
973                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
974                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
975                         "while marking %s",
976                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
977                         bucket_gen,
978                         bch2_data_types[*bucket_data_type],
979                         bch2_data_types[ptr_data_type],
980                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
981                 return -EIO;
982         }
983
984         if (checked_add(*dst_sectors, sectors)) {
985                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
986                         "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
987                         "while marking %s",
988                         p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
989                         bucket_gen,
990                         bch2_data_types[*bucket_data_type ?: ptr_data_type],
991                         orig_sectors, sectors,
992                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
993                 return -EIO;
994         }
995
996         *bucket_data_type = *dirty_sectors || *cached_sectors
997                 ? ptr_data_type : 0;
998         return 0;
999 }
1000
1001 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1002                              struct extent_ptr_decoded p,
1003                              s64 sectors, enum bch_data_type data_type,
1004                              struct bch_fs_usage *fs_usage,
1005                              u64 journal_seq, unsigned flags)
1006 {
1007         bool gc = flags & BTREE_TRIGGER_GC;
1008         struct bucket_mark old, new;
1009         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1010         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
1011         u8 bucket_data_type;
1012         u64 v;
1013         int ret;
1014
1015         v = atomic64_read(&g->_mark.v);
1016         do {
1017                 new.v.counter = old.v.counter = v;
1018                 bucket_data_type = new.data_type;
1019
1020                 ret = __mark_pointer(c, k, p, sectors, data_type, new.gen,
1021                                      &bucket_data_type,
1022                                      &new.dirty_sectors,
1023                                      &new.cached_sectors);
1024                 if (ret)
1025                         return ret;
1026
1027                 new.data_type = bucket_data_type;
1028
1029                 if (journal_seq) {
1030                         new.journal_seq_valid = 1;
1031                         new.journal_seq = journal_seq;
1032                 }
1033
1034                 if (flags & BTREE_TRIGGER_NOATOMIC) {
1035                         g->_mark = new;
1036                         break;
1037                 }
1038         } while ((v = atomic64_cmpxchg(&g->_mark.v,
1039                               old.v.counter,
1040                               new.v.counter)) != old.v.counter);
1041
1042         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1043
1044         BUG_ON(!gc && bucket_became_unavailable(old, new));
1045
1046         return 0;
1047 }
1048
1049 static int bch2_mark_stripe_ptr(struct bch_fs *c,
1050                                 struct bch_extent_stripe_ptr p,
1051                                 enum bch_data_type data_type,
1052                                 struct bch_fs_usage *fs_usage,
1053                                 s64 sectors, unsigned flags,
1054                                 struct bch_replicas_padded *r,
1055                                 unsigned *nr_data,
1056                                 unsigned *nr_parity)
1057 {
1058         bool gc = flags & BTREE_TRIGGER_GC;
1059         struct stripe *m;
1060         unsigned old, new;
1061         int blocks_nonempty_delta;
1062
1063         m = genradix_ptr(&c->stripes[gc], p.idx);
1064
1065         spin_lock(&c->ec_stripes_heap_lock);
1066
1067         if (!m || !m->alive) {
1068                 spin_unlock(&c->ec_stripes_heap_lock);
1069                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1070                                     (u64) p.idx);
1071                 return -EIO;
1072         }
1073
1074         BUG_ON(m->r.e.data_type != data_type);
1075
1076         *nr_data        = m->nr_blocks - m->nr_redundant;
1077         *nr_parity      = m->nr_redundant;
1078         *r = m->r;
1079
1080         old = m->block_sectors[p.block];
1081         m->block_sectors[p.block] += sectors;
1082         new = m->block_sectors[p.block];
1083
1084         blocks_nonempty_delta = (int) !!new - (int) !!old;
1085         if (blocks_nonempty_delta) {
1086                 m->blocks_nonempty += blocks_nonempty_delta;
1087
1088                 if (!gc)
1089                         bch2_stripes_heap_update(c, m, p.idx);
1090         }
1091
1092         m->dirty = true;
1093
1094         spin_unlock(&c->ec_stripes_heap_lock);
1095
1096         return 0;
1097 }
1098
1099 static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
1100                             unsigned offset, s64 sectors,
1101                             enum bch_data_type data_type,
1102                             struct bch_fs_usage *fs_usage,
1103                             unsigned journal_seq, unsigned flags)
1104 {
1105         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1106         const union bch_extent_entry *entry;
1107         struct extent_ptr_decoded p;
1108         struct bch_replicas_padded r;
1109         s64 dirty_sectors = 0;
1110         bool stale;
1111         int ret;
1112
1113         r.e.data_type   = data_type;
1114         r.e.nr_devs     = 0;
1115         r.e.nr_required = 1;
1116
1117         BUG_ON(!sectors);
1118
1119         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1120                 s64 disk_sectors = data_type == BCH_DATA_BTREE
1121                         ? sectors
1122                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1123
1124                 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
1125                                         fs_usage, journal_seq, flags);
1126                 if (ret < 0)
1127                         return ret;
1128
1129                 stale = ret > 0;
1130
1131                 if (p.ptr.cached) {
1132                         if (!stale)
1133                                 update_cached_sectors(c, fs_usage, p.ptr.dev,
1134                                                       disk_sectors);
1135                 } else if (!p.has_ec) {
1136                         dirty_sectors          += disk_sectors;
1137                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1138                 } else {
1139                         struct bch_replicas_padded ec_r;
1140                         unsigned nr_data, nr_parity;
1141                         s64 parity_sectors;
1142
1143                         ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1144                                         fs_usage, disk_sectors, flags,
1145                                         &ec_r, &nr_data, &nr_parity);
1146                         if (ret)
1147                                 return ret;
1148
1149                         parity_sectors =
1150                                 __ptr_disk_sectors_delta(p.crc.live_size,
1151                                         offset, sectors, flags,
1152                                         p.crc.compressed_size * nr_parity,
1153                                         p.crc.uncompressed_size * nr_data);
1154
1155                         update_replicas(c, fs_usage, &ec_r.e,
1156                                         disk_sectors + parity_sectors);
1157
1158                         /*
1159                          * There may be other dirty pointers in this extent, but
1160                          * if so they're not required for mounting if we have an
1161                          * erasure coded pointer in this extent:
1162                          */
1163                         r.e.nr_required = 0;
1164                 }
1165         }
1166
1167         if (r.e.nr_devs)
1168                 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1169
1170         return 0;
1171 }
1172
1173 static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
1174                             struct bch_fs_usage *fs_usage,
1175                             u64 journal_seq, unsigned flags)
1176 {
1177         bool gc = flags & BTREE_TRIGGER_GC;
1178         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
1179         size_t idx = s.k->p.offset;
1180         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1181         unsigned i;
1182
1183         spin_lock(&c->ec_stripes_heap_lock);
1184
1185         if (!m || ((flags & BTREE_TRIGGER_OVERWRITE) && !m->alive)) {
1186                 spin_unlock(&c->ec_stripes_heap_lock);
1187                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1188                                     idx);
1189                 return -1;
1190         }
1191
1192         if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
1193                 m->sectors      = le16_to_cpu(s.v->sectors);
1194                 m->algorithm    = s.v->algorithm;
1195                 m->nr_blocks    = s.v->nr_blocks;
1196                 m->nr_redundant = s.v->nr_redundant;
1197
1198                 bch2_bkey_to_replicas(&m->r.e, k);
1199
1200                 /*
1201                  * XXX: account for stripes somehow here
1202                  */
1203 #if 0
1204                 update_replicas(c, fs_usage, &m->r.e, stripe_sectors);
1205 #endif
1206
1207                 /* gc recalculates these fields: */
1208                 if (!(flags & BTREE_TRIGGER_GC)) {
1209                         for (i = 0; i < s.v->nr_blocks; i++) {
1210                                 m->block_sectors[i] =
1211                                         stripe_blockcount_get(s.v, i);
1212                                 m->blocks_nonempty += !!m->block_sectors[i];
1213                         }
1214                 }
1215
1216                 if (!gc)
1217                         bch2_stripes_heap_update(c, m, idx);
1218                 m->alive        = true;
1219         } else {
1220                 if (!gc)
1221                         bch2_stripes_heap_del(c, m, idx);
1222                 memset(m, 0, sizeof(*m));
1223         }
1224
1225         spin_unlock(&c->ec_stripes_heap_lock);
1226
1227         bucket_set_stripe(c, s.v, fs_usage, 0, flags);
1228         return 0;
1229 }
1230
1231 static int bch2_mark_key_locked(struct bch_fs *c,
1232                    struct bkey_s_c k,
1233                    unsigned offset, s64 sectors,
1234                    struct bch_fs_usage *fs_usage,
1235                    u64 journal_seq, unsigned flags)
1236 {
1237         int ret = 0;
1238
1239         preempt_disable();
1240
1241         if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1242                 fs_usage = fs_usage_ptr(c, journal_seq,
1243                                         flags & BTREE_TRIGGER_GC);
1244
1245         switch (k.k->type) {
1246         case KEY_TYPE_alloc:
1247                 ret = bch2_mark_alloc(c, k, fs_usage, journal_seq, flags);
1248                 break;
1249         case KEY_TYPE_btree_ptr:
1250         case KEY_TYPE_btree_ptr_v2:
1251                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1252                         ?  c->opts.btree_node_size
1253                         : -c->opts.btree_node_size;
1254
1255                 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_BTREE,
1256                                 fs_usage, journal_seq, flags);
1257                 break;
1258         case KEY_TYPE_extent:
1259         case KEY_TYPE_reflink_v:
1260                 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_USER,
1261                                 fs_usage, journal_seq, flags);
1262                 break;
1263         case KEY_TYPE_stripe:
1264                 ret = bch2_mark_stripe(c, k, fs_usage, journal_seq, flags);
1265                 break;
1266         case KEY_TYPE_inode:
1267                 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1268                         fs_usage->nr_inodes++;
1269                 else
1270                         fs_usage->nr_inodes--;
1271                 break;
1272         case KEY_TYPE_reservation: {
1273                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1274
1275                 sectors *= replicas;
1276                 replicas = clamp_t(unsigned, replicas, 1,
1277                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1278
1279                 fs_usage->reserved                              += sectors;
1280                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1281                 break;
1282         }
1283         }
1284
1285         preempt_enable();
1286
1287         return ret;
1288 }
1289
1290 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
1291                   unsigned offset, s64 sectors,
1292                   struct bch_fs_usage *fs_usage,
1293                   u64 journal_seq, unsigned flags)
1294 {
1295         int ret;
1296
1297         percpu_down_read(&c->mark_lock);
1298         ret = bch2_mark_key_locked(c, k, offset, sectors,
1299                                    fs_usage, journal_seq, flags);
1300         percpu_up_read(&c->mark_lock);
1301
1302         return ret;
1303 }
1304
1305 inline int bch2_mark_overwrite(struct btree_trans *trans,
1306                                struct btree_iter *iter,
1307                                struct bkey_s_c old,
1308                                struct bkey_i *new,
1309                                struct bch_fs_usage *fs_usage,
1310                                unsigned flags,
1311                                bool is_extents)
1312 {
1313         struct bch_fs           *c = trans->c;
1314         unsigned                offset = 0;
1315         s64                     sectors = -((s64) old.k->size);
1316
1317         flags |= BTREE_TRIGGER_OVERWRITE;
1318
1319         if (is_extents
1320             ? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
1321             : bkey_cmp(new->k.p, old.k->p))
1322                 return 0;
1323
1324         if (is_extents) {
1325                 switch (bch2_extent_overlap(&new->k, old.k)) {
1326                 case BCH_EXTENT_OVERLAP_ALL:
1327                         offset = 0;
1328                         sectors = -((s64) old.k->size);
1329                         break;
1330                 case BCH_EXTENT_OVERLAP_BACK:
1331                         offset = bkey_start_offset(&new->k) -
1332                                 bkey_start_offset(old.k);
1333                         sectors = bkey_start_offset(&new->k) -
1334                                 old.k->p.offset;
1335                         break;
1336                 case BCH_EXTENT_OVERLAP_FRONT:
1337                         offset = 0;
1338                         sectors = bkey_start_offset(old.k) -
1339                                 new->k.p.offset;
1340                         break;
1341                 case BCH_EXTENT_OVERLAP_MIDDLE:
1342                         offset = bkey_start_offset(&new->k) -
1343                                 bkey_start_offset(old.k);
1344                         sectors = -((s64) new->k.size);
1345                         flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1346                         break;
1347                 }
1348
1349                 BUG_ON(sectors >= 0);
1350         }
1351
1352         return bch2_mark_key_locked(c, old, offset, sectors, fs_usage,
1353                                     trans->journal_res.seq, flags) ?: 1;
1354 }
1355
1356 int bch2_mark_update(struct btree_trans *trans,
1357                      struct btree_iter *iter,
1358                      struct bkey_i *insert,
1359                      struct bch_fs_usage *fs_usage,
1360                      unsigned flags)
1361 {
1362         struct bch_fs           *c = trans->c;
1363         struct btree            *b = iter->l[0].b;
1364         struct btree_node_iter  node_iter = iter->l[0].iter;
1365         struct bkey_packed      *_k;
1366         int ret = 0;
1367
1368         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1369                 return 0;
1370
1371         if (!btree_node_type_needs_gc(iter->btree_id))
1372                 return 0;
1373
1374         bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
1375                 0, insert->k.size,
1376                 fs_usage, trans->journal_res.seq,
1377                 BTREE_TRIGGER_INSERT|flags);
1378
1379         if (unlikely(flags & BTREE_TRIGGER_NOOVERWRITES))
1380                 return 0;
1381
1382         /*
1383          * For non extents, we only mark the new key, not the key being
1384          * overwritten - unless we're actually deleting:
1385          */
1386         if ((iter->btree_id == BTREE_ID_ALLOC ||
1387              iter->btree_id == BTREE_ID_EC) &&
1388             !bkey_deleted(&insert->k))
1389                 return 0;
1390
1391         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1392                 struct bkey             unpacked;
1393                 struct bkey_s_c         k = bkey_disassemble(b, _k, &unpacked);
1394
1395                 ret = bch2_mark_overwrite(trans, iter, k, insert,
1396                                           fs_usage, flags,
1397                                           btree_node_type_is_extents(iter->btree_id));
1398                 if (ret <= 0)
1399                         break;
1400
1401                 bch2_btree_node_iter_advance(&node_iter, b);
1402         }
1403
1404         return ret;
1405 }
1406
1407 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1408                                struct bch_fs_usage *fs_usage)
1409 {
1410         struct bch_fs *c = trans->c;
1411         struct btree_insert_entry *i;
1412         static int warned_disk_usage = 0;
1413         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1414         char buf[200];
1415
1416         if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1417                                  trans->journal_res.seq) ||
1418             warned_disk_usage ||
1419             xchg(&warned_disk_usage, 1))
1420                 return;
1421
1422         bch_err(c, "disk usage increased more than %llu sectors reserved",
1423                 disk_res_sectors);
1424
1425         trans_for_each_update(trans, i) {
1426                 struct btree_iter       *iter = i->iter;
1427                 struct btree            *b = iter->l[0].b;
1428                 struct btree_node_iter  node_iter = iter->l[0].iter;
1429                 struct bkey_packed      *_k;
1430
1431                 pr_err("while inserting");
1432                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1433                 pr_err("%s", buf);
1434                 pr_err("overlapping with");
1435
1436                 node_iter = iter->l[0].iter;
1437                 while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1438                         struct bkey             unpacked;
1439                         struct bkey_s_c         k;
1440
1441                         k = bkey_disassemble(b, _k, &unpacked);
1442
1443                         if (btree_node_is_extents(b)
1444                             ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1445                             : bkey_cmp(i->k->k.p, k.k->p))
1446                                 break;
1447
1448                         bch2_bkey_val_to_text(&PBUF(buf), c, k);
1449                         pr_err("%s", buf);
1450
1451                         bch2_btree_node_iter_advance(&node_iter, b);
1452                 }
1453         }
1454 }
1455
1456 /* trans_mark: */
1457
1458 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1459                             enum btree_id btree_id, struct bpos pos,
1460                             struct bkey_s_c *k)
1461 {
1462         struct btree_insert_entry *i;
1463
1464         trans_for_each_update(trans, i)
1465                 if (i->iter->btree_id == btree_id &&
1466                     (btree_node_type_is_extents(btree_id)
1467                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1468                        bkey_cmp(pos, i->k->k.p) < 0
1469                      : !bkey_cmp(pos, i->iter->pos))) {
1470                         *k = bkey_i_to_s_c(i->k);
1471                         return i->iter;
1472                 }
1473
1474         return NULL;
1475 }
1476
1477 static int trans_get_key(struct btree_trans *trans,
1478                          enum btree_id btree_id, struct bpos pos,
1479                          struct btree_iter **iter,
1480                          struct bkey_s_c *k)
1481 {
1482         unsigned flags = btree_id != BTREE_ID_ALLOC
1483                 ? BTREE_ITER_SLOTS
1484                 : BTREE_ITER_CACHED;
1485         int ret;
1486
1487         *iter = trans_get_update(trans, btree_id, pos, k);
1488         if (*iter)
1489                 return 1;
1490
1491         *iter = bch2_trans_get_iter(trans, btree_id, pos,
1492                                     flags|BTREE_ITER_INTENT);
1493         if (IS_ERR(*iter))
1494                 return PTR_ERR(*iter);
1495
1496         *k = __bch2_btree_iter_peek(*iter, flags);
1497         ret = bkey_err(*k);
1498         if (ret)
1499                 bch2_trans_iter_put(trans, *iter);
1500         return ret;
1501 }
1502
1503 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1504                         struct bkey_s_c k, struct extent_ptr_decoded p,
1505                         s64 sectors, enum bch_data_type data_type)
1506 {
1507         struct bch_fs *c = trans->c;
1508         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1509         struct bpos pos = POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr));
1510         struct btree_iter *iter;
1511         struct bkey_s_c k_a;
1512         struct bkey_alloc_unpacked u;
1513         struct bkey_i_alloc *a;
1514         struct bucket *g;
1515         int ret;
1516
1517         iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k_a);
1518         if (iter) {
1519                 u = bch2_alloc_unpack(k_a);
1520         } else {
1521                 iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
1522                                            BTREE_ITER_CACHED|
1523                                            BTREE_ITER_CACHED_NOFILL|
1524                                            BTREE_ITER_INTENT);
1525                 if (IS_ERR(iter))
1526                         return PTR_ERR(iter);
1527
1528                 ret = bch2_btree_iter_traverse(iter);
1529                 if (ret)
1530                         goto out;
1531
1532                 percpu_down_read(&c->mark_lock);
1533                 g = bucket(ca, pos.offset);
1534                 u = alloc_mem_to_key(g, READ_ONCE(g->mark));
1535                 percpu_up_read(&c->mark_lock);
1536         }
1537
1538         ret = __mark_pointer(c, k, p, sectors, data_type, u.gen, &u.data_type,
1539                              &u.dirty_sectors, &u.cached_sectors);
1540         if (ret)
1541                 goto out;
1542
1543         a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1544         ret = PTR_ERR_OR_ZERO(a);
1545         if (ret)
1546                 goto out;
1547
1548         bkey_alloc_init(&a->k_i);
1549         a->k.p = pos;
1550         bch2_alloc_pack(a, u);
1551         bch2_trans_update(trans, iter, &a->k_i, 0);
1552 out:
1553         bch2_trans_iter_put(trans, iter);
1554         return ret;
1555 }
1556
1557 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1558                         struct bch_extent_stripe_ptr p,
1559                         s64 sectors, enum bch_data_type data_type,
1560                         struct bch_replicas_padded *r,
1561                         unsigned *nr_data,
1562                         unsigned *nr_parity)
1563 {
1564         struct bch_fs *c = trans->c;
1565         struct btree_iter *iter;
1566         struct bkey_s_c k;
1567         struct bkey_i_stripe *s;
1568         int ret = 0;
1569
1570         ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1571         if (ret < 0)
1572                 return ret;
1573
1574         if (k.k->type != KEY_TYPE_stripe) {
1575                 bch2_fs_inconsistent(c,
1576                         "pointer to nonexistent stripe %llu",
1577                         (u64) p.idx);
1578                 ret = -EIO;
1579                 goto out;
1580         }
1581
1582         s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1583         ret = PTR_ERR_OR_ZERO(s);
1584         if (ret)
1585                 goto out;
1586
1587         bkey_reassemble(&s->k_i, k);
1588
1589         stripe_blockcount_set(&s->v, p.block,
1590                 stripe_blockcount_get(&s->v, p.block) +
1591                 sectors);
1592
1593         *nr_data        = s->v.nr_blocks - s->v.nr_redundant;
1594         *nr_parity      = s->v.nr_redundant;
1595         bch2_bkey_to_replicas(&r->e, bkey_i_to_s_c(&s->k_i));
1596         bch2_trans_update(trans, iter, &s->k_i, 0);
1597 out:
1598         bch2_trans_iter_put(trans, iter);
1599         return ret;
1600 }
1601
1602 static int bch2_trans_mark_extent(struct btree_trans *trans,
1603                         struct bkey_s_c k, unsigned offset,
1604                         s64 sectors, unsigned flags,
1605                         enum bch_data_type data_type)
1606 {
1607         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1608         const union bch_extent_entry *entry;
1609         struct extent_ptr_decoded p;
1610         struct bch_replicas_padded r;
1611         s64 dirty_sectors = 0;
1612         bool stale;
1613         int ret;
1614
1615         r.e.data_type   = data_type;
1616         r.e.nr_devs     = 0;
1617         r.e.nr_required = 1;
1618
1619         BUG_ON(!sectors);
1620
1621         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1622                 s64 disk_sectors = data_type == BCH_DATA_BTREE
1623                         ? sectors
1624                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1625
1626                 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1627                                               data_type);
1628                 if (ret < 0)
1629                         return ret;
1630
1631                 stale = ret > 0;
1632
1633                 if (p.ptr.cached) {
1634                         if (!stale)
1635                                 update_cached_sectors_list(trans, p.ptr.dev,
1636                                                            disk_sectors);
1637                 } else if (!p.has_ec) {
1638                         dirty_sectors          += disk_sectors;
1639                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1640                 } else {
1641                         struct bch_replicas_padded ec_r;
1642                         unsigned nr_data, nr_parity;
1643                         s64 parity_sectors;
1644
1645                         ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
1646                                         disk_sectors, data_type,
1647                                         &ec_r, &nr_data, &nr_parity);
1648                         if (ret)
1649                                 return ret;
1650
1651                         parity_sectors =
1652                                 __ptr_disk_sectors_delta(p.crc.live_size,
1653                                         offset, sectors, flags,
1654                                         p.crc.compressed_size * nr_parity,
1655                                         p.crc.uncompressed_size * nr_data);
1656
1657                         update_replicas_list(trans, &ec_r.e,
1658                                              disk_sectors + parity_sectors);
1659
1660                         r.e.nr_required = 0;
1661                 }
1662         }
1663
1664         if (r.e.nr_devs)
1665                 update_replicas_list(trans, &r.e, dirty_sectors);
1666
1667         return 0;
1668 }
1669
1670 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1671                         struct bkey_s_c_reflink_p p,
1672                         u64 idx, unsigned sectors,
1673                         unsigned flags)
1674 {
1675         struct bch_fs *c = trans->c;
1676         struct btree_iter *iter;
1677         struct bkey_s_c k;
1678         struct bkey_i_reflink_v *r_v;
1679         s64 ret;
1680
1681         ret = trans_get_key(trans, BTREE_ID_REFLINK,
1682                             POS(0, idx), &iter, &k);
1683         if (ret < 0)
1684                 return ret;
1685
1686         if (k.k->type != KEY_TYPE_reflink_v) {
1687                 bch2_fs_inconsistent(c,
1688                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1689                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1690                 ret = -EIO;
1691                 goto err;
1692         }
1693
1694         if ((flags & BTREE_TRIGGER_OVERWRITE) &&
1695             (bkey_start_offset(k.k) < idx ||
1696              k.k->p.offset > idx + sectors))
1697                 goto out;
1698
1699         sectors = k.k->p.offset - idx;
1700
1701         r_v = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1702         ret = PTR_ERR_OR_ZERO(r_v);
1703         if (ret)
1704                 goto err;
1705
1706         bkey_reassemble(&r_v->k_i, k);
1707
1708         le64_add_cpu(&r_v->v.refcount,
1709                      !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
1710
1711         if (!r_v->v.refcount) {
1712                 r_v->k.type = KEY_TYPE_deleted;
1713                 set_bkey_val_u64s(&r_v->k, 0);
1714         }
1715
1716         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1717         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1718
1719         bch2_trans_update(trans, iter, &r_v->k_i, 0);
1720 out:
1721         ret = sectors;
1722 err:
1723         bch2_trans_iter_put(trans, iter);
1724         return ret;
1725 }
1726
1727 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1728                         struct bkey_s_c_reflink_p p, unsigned offset,
1729                         s64 sectors, unsigned flags)
1730 {
1731         u64 idx = le64_to_cpu(p.v->idx) + offset;
1732         s64 ret = 0;
1733
1734         sectors = abs(sectors);
1735         BUG_ON(offset + sectors > p.k->size);
1736
1737         while (sectors) {
1738                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1739                 if (ret < 0)
1740                         break;
1741
1742                 idx += ret;
1743                 sectors = max_t(s64, 0LL, sectors - ret);
1744                 ret = 0;
1745         }
1746
1747         return ret;
1748 }
1749
1750 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
1751                         unsigned offset, s64 sectors, unsigned flags)
1752 {
1753         struct replicas_delta_list *d;
1754         struct bch_fs *c = trans->c;
1755
1756         switch (k.k->type) {
1757         case KEY_TYPE_btree_ptr:
1758         case KEY_TYPE_btree_ptr_v2:
1759                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1760                         ?  c->opts.btree_node_size
1761                         : -c->opts.btree_node_size;
1762
1763                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1764                                               flags, BCH_DATA_BTREE);
1765         case KEY_TYPE_extent:
1766         case KEY_TYPE_reflink_v:
1767                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1768                                               flags, BCH_DATA_USER);
1769         case KEY_TYPE_inode:
1770                 d = replicas_deltas_realloc(trans, 0);
1771
1772                 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1773                         d->nr_inodes++;
1774                 else
1775                         d->nr_inodes--;
1776                 return 0;
1777         case KEY_TYPE_reservation: {
1778                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1779
1780                 d = replicas_deltas_realloc(trans, 0);
1781
1782                 sectors *= replicas;
1783                 replicas = clamp_t(unsigned, replicas, 1,
1784                                    ARRAY_SIZE(d->persistent_reserved));
1785
1786                 d->persistent_reserved[replicas - 1] += sectors;
1787                 return 0;
1788         }
1789         case KEY_TYPE_reflink_p:
1790                 return bch2_trans_mark_reflink_p(trans,
1791                                         bkey_s_c_to_reflink_p(k),
1792                                         offset, sectors, flags);
1793         default:
1794                 return 0;
1795         }
1796 }
1797
1798 int bch2_trans_mark_update(struct btree_trans *trans,
1799                            struct btree_iter *iter,
1800                            struct bkey_i *insert,
1801                            unsigned flags)
1802 {
1803         struct btree            *b = iter->l[0].b;
1804         struct btree_node_iter  node_iter = iter->l[0].iter;
1805         struct bkey_packed      *_k;
1806         int ret;
1807
1808         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1809                 return 0;
1810
1811         if (!btree_node_type_needs_gc(iter->btree_id))
1812                 return 0;
1813
1814         ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
1815                         0, insert->k.size, BTREE_TRIGGER_INSERT);
1816         if (ret)
1817                 return ret;
1818
1819         if (unlikely(flags & BTREE_TRIGGER_NOOVERWRITES))
1820                 return 0;
1821
1822         if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1823                 struct bkey_cached *ck = (void *) iter->l[0].b;
1824
1825                 return bch2_trans_mark_key(trans, bkey_i_to_s_c(ck->k),
1826                                            0, 0, BTREE_TRIGGER_OVERWRITE);
1827         }
1828
1829         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1830                 struct bkey             unpacked;
1831                 struct bkey_s_c         k;
1832                 unsigned                offset = 0;
1833                 s64                     sectors = 0;
1834                 unsigned                flags = BTREE_TRIGGER_OVERWRITE;
1835
1836                 k = bkey_disassemble(b, _k, &unpacked);
1837
1838                 if (btree_node_is_extents(b)
1839                     ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
1840                     : bkey_cmp(insert->k.p, k.k->p))
1841                         break;
1842
1843                 if (btree_node_is_extents(b)) {
1844                         switch (bch2_extent_overlap(&insert->k, k.k)) {
1845                         case BCH_EXTENT_OVERLAP_ALL:
1846                                 offset = 0;
1847                                 sectors = -((s64) k.k->size);
1848                                 break;
1849                         case BCH_EXTENT_OVERLAP_BACK:
1850                                 offset = bkey_start_offset(&insert->k) -
1851                                         bkey_start_offset(k.k);
1852                                 sectors = bkey_start_offset(&insert->k) -
1853                                         k.k->p.offset;
1854                                 break;
1855                         case BCH_EXTENT_OVERLAP_FRONT:
1856                                 offset = 0;
1857                                 sectors = bkey_start_offset(k.k) -
1858                                         insert->k.p.offset;
1859                                 break;
1860                         case BCH_EXTENT_OVERLAP_MIDDLE:
1861                                 offset = bkey_start_offset(&insert->k) -
1862                                         bkey_start_offset(k.k);
1863                                 sectors = -((s64) insert->k.size);
1864                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1865                                 break;
1866                         }
1867
1868                         BUG_ON(sectors >= 0);
1869                 }
1870
1871                 ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
1872                 if (ret)
1873                         return ret;
1874
1875                 bch2_btree_node_iter_advance(&node_iter, b);
1876         }
1877
1878         return 0;
1879 }
1880
1881 /* Disk reservations: */
1882
1883 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
1884 {
1885         percpu_u64_set(&c->pcpu->sectors_available, 0);
1886
1887         return avail_factor(__bch2_fs_usage_read_short(c).free);
1888 }
1889
1890 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
1891 {
1892         percpu_down_read(&c->mark_lock);
1893         this_cpu_sub(c->usage[0]->online_reserved,
1894                      res->sectors);
1895         percpu_up_read(&c->mark_lock);
1896
1897         res->sectors = 0;
1898 }
1899
1900 #define SECTORS_CACHE   1024
1901
1902 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1903                               unsigned sectors, int flags)
1904 {
1905         struct bch_fs_pcpu *pcpu;
1906         u64 old, v, get;
1907         s64 sectors_available;
1908         int ret;
1909
1910         percpu_down_read(&c->mark_lock);
1911         preempt_disable();
1912         pcpu = this_cpu_ptr(c->pcpu);
1913
1914         if (sectors <= pcpu->sectors_available)
1915                 goto out;
1916
1917         v = atomic64_read(&c->sectors_available);
1918         do {
1919                 old = v;
1920                 get = min((u64) sectors + SECTORS_CACHE, old);
1921
1922                 if (get < sectors) {
1923                         preempt_enable();
1924                         percpu_up_read(&c->mark_lock);
1925                         goto recalculate;
1926                 }
1927         } while ((v = atomic64_cmpxchg(&c->sectors_available,
1928                                        old, old - get)) != old);
1929
1930         pcpu->sectors_available         += get;
1931
1932 out:
1933         pcpu->sectors_available         -= sectors;
1934         this_cpu_add(c->usage[0]->online_reserved, sectors);
1935         res->sectors                    += sectors;
1936
1937         preempt_enable();
1938         percpu_up_read(&c->mark_lock);
1939         return 0;
1940
1941 recalculate:
1942         percpu_down_write(&c->mark_lock);
1943
1944         sectors_available = bch2_recalc_sectors_available(c);
1945
1946         if (sectors <= sectors_available ||
1947             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1948                 atomic64_set(&c->sectors_available,
1949                              max_t(s64, 0, sectors_available - sectors));
1950                 this_cpu_add(c->usage[0]->online_reserved, sectors);
1951                 res->sectors                    += sectors;
1952                 ret = 0;
1953         } else {
1954                 atomic64_set(&c->sectors_available, sectors_available);
1955                 ret = -ENOSPC;
1956         }
1957
1958         percpu_up_write(&c->mark_lock);
1959
1960         return ret;
1961 }
1962
1963 /* Startup/shutdown: */
1964
1965 static void buckets_free_rcu(struct rcu_head *rcu)
1966 {
1967         struct bucket_array *buckets =
1968                 container_of(rcu, struct bucket_array, rcu);
1969
1970         kvpfree(buckets,
1971                 sizeof(struct bucket_array) +
1972                 buckets->nbuckets * sizeof(struct bucket));
1973 }
1974
1975 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1976 {
1977         struct bucket_array *buckets = NULL, *old_buckets = NULL;
1978         unsigned long *buckets_nouse = NULL;
1979         alloc_fifo      free[RESERVE_NR];
1980         alloc_fifo      free_inc;
1981         alloc_heap      alloc_heap;
1982         copygc_heap     copygc_heap;
1983
1984         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1985                              ca->mi.bucket_size / c->opts.btree_node_size);
1986         /* XXX: these should be tunable */
1987         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
1988         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 7);
1989         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
1990                                       btree_reserve * 2);
1991         bool resize = ca->buckets[0] != NULL,
1992              start_copygc = ca->copygc_thread != NULL;
1993         int ret = -ENOMEM;
1994         unsigned i;
1995
1996         lockdep_assert_held(&c->state_lock);
1997
1998         memset(&free,           0, sizeof(free));
1999         memset(&free_inc,       0, sizeof(free_inc));
2000         memset(&alloc_heap,     0, sizeof(alloc_heap));
2001         memset(&copygc_heap,    0, sizeof(copygc_heap));
2002
2003         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
2004                                             nbuckets * sizeof(struct bucket),
2005                                             GFP_KERNEL|__GFP_ZERO)) ||
2006             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2007                                             sizeof(unsigned long),
2008                                             GFP_KERNEL|__GFP_ZERO)) ||
2009             !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
2010             !init_fifo(&free[RESERVE_MOVINGGC],
2011                        copygc_reserve, GFP_KERNEL) ||
2012             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2013             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
2014             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) ||
2015             !init_heap(&copygc_heap,    copygc_reserve, GFP_KERNEL))
2016                 goto err;
2017
2018         buckets->first_bucket   = ca->mi.first_bucket;
2019         buckets->nbuckets       = nbuckets;
2020
2021         bch2_copygc_stop(ca);
2022
2023         if (resize) {
2024                 down_write(&ca->bucket_lock);
2025                 percpu_down_write(&c->mark_lock);
2026         }
2027
2028         old_buckets = bucket_array(ca);
2029
2030         if (resize) {
2031                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2032
2033                 memcpy(buckets->b,
2034                        old_buckets->b,
2035                        n * sizeof(struct bucket));
2036                 memcpy(buckets_nouse,
2037                        ca->buckets_nouse,
2038                        BITS_TO_LONGS(n) * sizeof(unsigned long));
2039         }
2040
2041         rcu_assign_pointer(ca->buckets[0], buckets);
2042         buckets = old_buckets;
2043
2044         swap(ca->buckets_nouse, buckets_nouse);
2045
2046         if (resize)
2047                 percpu_up_write(&c->mark_lock);
2048
2049         spin_lock(&c->freelist_lock);
2050         for (i = 0; i < RESERVE_NR; i++) {
2051                 fifo_move(&free[i], &ca->free[i]);
2052                 swap(ca->free[i], free[i]);
2053         }
2054         fifo_move(&free_inc, &ca->free_inc);
2055         swap(ca->free_inc, free_inc);
2056         spin_unlock(&c->freelist_lock);
2057
2058         /* with gc lock held, alloc_heap can't be in use: */
2059         swap(ca->alloc_heap, alloc_heap);
2060
2061         /* and we shut down copygc: */
2062         swap(ca->copygc_heap, copygc_heap);
2063
2064         nbuckets = ca->mi.nbuckets;
2065
2066         if (resize)
2067                 up_write(&ca->bucket_lock);
2068
2069         if (start_copygc &&
2070             bch2_copygc_start(c, ca))
2071                 bch_err(ca, "error restarting copygc thread");
2072
2073         ret = 0;
2074 err:
2075         free_heap(&copygc_heap);
2076         free_heap(&alloc_heap);
2077         free_fifo(&free_inc);
2078         for (i = 0; i < RESERVE_NR; i++)
2079                 free_fifo(&free[i]);
2080         kvpfree(buckets_nouse,
2081                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2082         if (buckets)
2083                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2084
2085         return ret;
2086 }
2087
2088 void bch2_dev_buckets_free(struct bch_dev *ca)
2089 {
2090         unsigned i;
2091
2092         free_heap(&ca->copygc_heap);
2093         free_heap(&ca->alloc_heap);
2094         free_fifo(&ca->free_inc);
2095         for (i = 0; i < RESERVE_NR; i++)
2096                 free_fifo(&ca->free[i]);
2097         kvpfree(ca->buckets_nouse,
2098                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2099         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2100                 sizeof(struct bucket_array) +
2101                 ca->mi.nbuckets * sizeof(struct bucket));
2102
2103         free_percpu(ca->usage[0]);
2104 }
2105
2106 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2107 {
2108         if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2109                 return -ENOMEM;
2110
2111         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2112 }