]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
d72e5951615f2e65f8581b2553696cca82fc8917
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 /*
2  * Code for manipulating bucket marks for garbage collection.
3  *
4  * Copyright 2014 Datera, Inc.
5  *
6  * Bucket states:
7  * - free bucket: mark == 0
8  *   The bucket contains no data and will not be read
9  *
10  * - allocator bucket: owned_by_allocator == 1
11  *   The bucket is on a free list, or it is an open bucket
12  *
13  * - cached bucket: owned_by_allocator == 0 &&
14  *                  dirty_sectors == 0 &&
15  *                  cached_sectors > 0
16  *   The bucket contains data but may be safely discarded as there are
17  *   enough replicas of the data on other cache devices, or it has been
18  *   written back to the backing device
19  *
20  * - dirty bucket: owned_by_allocator == 0 &&
21  *                 dirty_sectors > 0
22  *   The bucket contains data that we must not discard (either only copy,
23  *   or one of the 'main copies' for data requiring multiple replicas)
24  *
25  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
26  *   This is a btree node, journal or gen/prio bucket
27  *
28  * Lifecycle:
29  *
30  * bucket invalidated => bucket on freelist => open bucket =>
31  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
32  *
33  * Note that cache promotion can skip the dirty bucket step, as data
34  * is copied from a deeper tier to a shallower tier, onto a cached
35  * bucket.
36  * Note also that a cached bucket can spontaneously become dirty --
37  * see below.
38  *
39  * Only a traversal of the key space can determine whether a bucket is
40  * truly dirty or cached.
41  *
42  * Transitions:
43  *
44  * - free => allocator: bucket was invalidated
45  * - cached => allocator: bucket was invalidated
46  *
47  * - allocator => dirty: open bucket was filled up
48  * - allocator => cached: open bucket was filled up
49  * - allocator => metadata: metadata was allocated
50  *
51  * - dirty => cached: dirty sectors were copied to a deeper tier
52  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
53  * - cached => free: cached sectors were overwritten
54  *
55  * - metadata => free: metadata was freed
56  *
57  * Oddities:
58  * - cached => dirty: a device was removed so formerly replicated data
59  *                    is no longer sufficiently replicated
60  * - free => cached: cannot happen
61  * - free => dirty: cannot happen
62  * - free => metadata: cannot happen
63  */
64
65 #include "bcachefs.h"
66 #include "alloc_background.h"
67 #include "bset.h"
68 #include "btree_gc.h"
69 #include "btree_update.h"
70 #include "buckets.h"
71 #include "ec.h"
72 #include "error.h"
73 #include "movinggc.h"
74
75 #include <linux/preempt.h>
76 #include <trace/events/bcachefs.h>
77
78 static inline u64 __bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
79
80 #ifdef DEBUG_BUCKETS
81
82 #define lg_local_lock   lg_global_lock
83 #define lg_local_unlock lg_global_unlock
84
85 static void bch2_fs_stats_verify(struct bch_fs *c)
86 {
87         struct bch_fs_usage stats =_bch2_fs_usage_read(c);
88         unsigned i, j;
89
90         for (i = 0; i < ARRAY_SIZE(stats.replicas); i++) {
91                 for (j = 0; j < ARRAY_SIZE(stats.replicas[i].data); j++)
92                         if ((s64) stats.replicas[i].data[j] < 0)
93                                 panic("replicas %u %s sectors underflow: %lli\n",
94                                       i + 1, bch_data_types[j],
95                                       stats.replicas[i].data[j]);
96
97                 if ((s64) stats.replicas[i].persistent_reserved < 0)
98                         panic("replicas %u reserved underflow: %lli\n",
99                               i + 1, stats.replicas[i].persistent_reserved);
100         }
101
102         for (j = 0; j < ARRAY_SIZE(stats.buckets); j++)
103                 if ((s64) stats.replicas[i].data_buckets[j] < 0)
104                         panic("%s buckets underflow: %lli\n",
105                               bch_data_types[j],
106                               stats.buckets[j]);
107
108         if ((s64) stats.s.online_reserved < 0)
109                 panic("sectors_online_reserved underflow: %lli\n",
110                       stats.s.online_reserved);
111 }
112
113 static void bch2_dev_stats_verify(struct bch_dev *ca)
114 {
115         struct bch_dev_usage stats =
116                 __bch2_dev_usage_read(ca);
117         u64 n = ca->mi.nbuckets - ca->mi.first_bucket;
118         unsigned i;
119
120         for (i = 0; i < ARRAY_SIZE(stats.buckets); i++)
121                 BUG_ON(stats.buckets[i]         > n);
122         BUG_ON(stats.buckets_alloc              > n);
123         BUG_ON(stats.buckets_unavailable        > n);
124 }
125
126 static void bch2_disk_reservations_verify(struct bch_fs *c, int flags)
127 {
128         if (!(flags & BCH_DISK_RESERVATION_NOFAIL)) {
129                 u64 used = __bch2_fs_sectors_used(c);
130                 u64 cached = 0;
131                 u64 avail = atomic64_read(&c->sectors_available);
132                 int cpu;
133
134                 for_each_possible_cpu(cpu)
135                         cached += per_cpu_ptr(c->usage_percpu, cpu)->available_cache;
136
137                 if (used + avail + cached > c->capacity)
138                         panic("used %llu avail %llu cached %llu capacity %llu\n",
139                               used, avail, cached, c->capacity);
140         }
141 }
142
143 #else
144
145 static void bch2_fs_stats_verify(struct bch_fs *c) {}
146 static void bch2_dev_stats_verify(struct bch_dev *ca) {}
147 static void bch2_disk_reservations_verify(struct bch_fs *c, int flags) {}
148
149 #endif
150
151 /*
152  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
153  * wraparound:
154  */
155 void bch2_bucket_seq_cleanup(struct bch_fs *c)
156 {
157         u64 journal_seq = atomic64_read(&c->journal.seq);
158         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
159         struct bch_dev *ca;
160         struct bucket_array *buckets;
161         struct bucket *g;
162         struct bucket_mark m;
163         unsigned i;
164
165         if (journal_seq - c->last_bucket_seq_cleanup <
166             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
167                 return;
168
169         c->last_bucket_seq_cleanup = journal_seq;
170
171         for_each_member_device(ca, c, i) {
172                 down_read(&ca->bucket_lock);
173                 buckets = bucket_array(ca);
174
175                 for_each_bucket(g, buckets) {
176                         bucket_cmpxchg(g, m, ({
177                                 if (!m.journal_seq_valid ||
178                                     bucket_needs_journal_commit(m, last_seq_ondisk))
179                                         break;
180
181                                 m.journal_seq_valid = 0;
182                         }));
183                 }
184                 up_read(&ca->bucket_lock);
185         }
186 }
187
188 #define bch2_usage_add(_acc, _stats)                                    \
189 do {                                                                    \
190         typeof(_acc) _a = (_acc), _s = (_stats);                        \
191         unsigned i;                                                     \
192                                                                         \
193         for (i = 0; i < sizeof(*_a) / sizeof(u64); i++)                 \
194                 ((u64 *) (_a))[i] += ((u64 *) (_s))[i];                 \
195 } while (0)
196
197 #define bch2_usage_read_raw(_stats)                                     \
198 ({                                                                      \
199         typeof(*this_cpu_ptr(_stats)) _acc;                             \
200         int cpu;                                                        \
201                                                                         \
202         memset(&_acc, 0, sizeof(_acc));                                 \
203                                                                         \
204         for_each_possible_cpu(cpu)                                      \
205                 bch2_usage_add(&_acc, per_cpu_ptr((_stats), cpu));      \
206                                                                         \
207         _acc;                                                           \
208 })
209
210 struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *ca, bool gc)
211 {
212         return bch2_usage_read_raw(ca->usage[gc]);
213 }
214
215 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
216 {
217         return bch2_usage_read_raw(ca->usage[0]);
218 }
219
220 struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *c, bool gc)
221 {
222         return bch2_usage_read_raw(c->usage[gc]);
223 }
224
225 struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *c)
226 {
227         return bch2_usage_read_raw(c->usage[0]);
228 }
229
230 #define RESERVE_FACTOR  6
231
232 static u64 reserve_factor(u64 r)
233 {
234         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
235 }
236
237 static u64 avail_factor(u64 r)
238 {
239         return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
240 }
241
242 static inline u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage fs_usage)
243 {
244         return fs_usage.s.hidden +
245                 fs_usage.s.data +
246                 reserve_factor(fs_usage.s.reserved +
247                                fs_usage.s.online_reserved);
248 }
249
250 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage fs_usage)
251 {
252         return min(c->capacity, __bch2_fs_sectors_used(c, fs_usage));
253 }
254
255 struct bch_fs_usage_short
256 bch2_fs_usage_read_short(struct bch_fs *c)
257 {
258         struct bch_fs_usage_summarized usage =
259                 bch2_usage_read_raw(&c->usage[0]->s);
260         struct bch_fs_usage_short ret;
261
262         ret.capacity    = READ_ONCE(c->capacity) - usage.hidden;
263         ret.used        = min(ret.capacity, usage.data +
264                               reserve_factor(usage.reserved +
265                                              usage.online_reserved));
266         ret.nr_inodes   = usage.nr_inodes;
267
268         return ret;
269 }
270
271 static inline int is_unavailable_bucket(struct bucket_mark m)
272 {
273         return !is_available_bucket(m);
274 }
275
276 static inline int is_fragmented_bucket(struct bucket_mark m,
277                                        struct bch_dev *ca)
278 {
279         if (!m.owned_by_allocator &&
280             m.data_type == BCH_DATA_USER &&
281             bucket_sectors_used(m))
282                 return max_t(int, 0, (int) ca->mi.bucket_size -
283                              bucket_sectors_used(m));
284         return 0;
285 }
286
287 static inline enum bch_data_type bucket_type(struct bucket_mark m)
288 {
289         return m.cached_sectors && !m.dirty_sectors
290                 ? BCH_DATA_CACHED
291                 : m.data_type;
292 }
293
294 static bool bucket_became_unavailable(struct bucket_mark old,
295                                       struct bucket_mark new)
296 {
297         return is_available_bucket(old) &&
298                !is_available_bucket(new);
299 }
300
301 void bch2_fs_usage_apply(struct bch_fs *c,
302                          struct bch_fs_usage *fs_usage,
303                          struct disk_reservation *disk_res,
304                          struct gc_pos gc_pos)
305 {
306         s64 added = fs_usage->s.data + fs_usage->s.reserved;
307         s64 should_not_have_added;
308
309         percpu_rwsem_assert_held(&c->mark_lock);
310
311         /*
312          * Not allowed to reduce sectors_available except by getting a
313          * reservation:
314          */
315         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
316         if (WARN_ONCE(should_not_have_added > 0,
317                       "disk usage increased without a reservation")) {
318                 atomic64_sub(should_not_have_added, &c->sectors_available);
319                 added -= should_not_have_added;
320         }
321
322         if (added > 0) {
323                 disk_res->sectors               -= added;
324                 fs_usage->s.online_reserved     -= added;
325         }
326
327         bch2_usage_add(this_cpu_ptr(c->usage[0]), fs_usage);
328
329         if (gc_visited(c, gc_pos))
330                 bch2_usage_add(this_cpu_ptr(c->usage[1]), fs_usage);
331
332         bch2_fs_stats_verify(c);
333
334         memset(fs_usage, 0, sizeof(*fs_usage));
335 }
336
337 static inline void account_bucket(struct bch_fs_usage *fs_usage,
338                                   struct bch_dev_usage *dev_usage,
339                                   enum bch_data_type type,
340                                   int nr, s64 size)
341 {
342         if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
343                 fs_usage->s.hidden      += size;
344
345         fs_usage->buckets[type]         += size;
346         dev_usage->buckets[type]        += nr;
347 }
348
349 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
350                                   struct bch_fs_usage *fs_usage,
351                                   struct bucket_mark old, struct bucket_mark new,
352                                   bool gc)
353 {
354         struct bch_dev_usage *dev_usage;
355
356         percpu_rwsem_assert_held(&c->mark_lock);
357
358         bch2_fs_inconsistent_on(old.data_type && new.data_type &&
359                                 old.data_type != new.data_type, c,
360                 "different types of data in same bucket: %s, %s",
361                 bch2_data_types[old.data_type],
362                 bch2_data_types[new.data_type]);
363
364         dev_usage = this_cpu_ptr(ca->usage[gc]);
365
366         if (bucket_type(old))
367                 account_bucket(fs_usage, dev_usage, bucket_type(old),
368                                -1, -ca->mi.bucket_size);
369
370         if (bucket_type(new))
371                 account_bucket(fs_usage, dev_usage, bucket_type(new),
372                                1, ca->mi.bucket_size);
373
374         dev_usage->buckets_alloc +=
375                 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
376         dev_usage->buckets_ec +=
377                 (int) new.stripe - (int) old.stripe;
378         dev_usage->buckets_unavailable +=
379                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
380
381         dev_usage->sectors[old.data_type] -= old.dirty_sectors;
382         dev_usage->sectors[new.data_type] += new.dirty_sectors;
383         dev_usage->sectors[BCH_DATA_CACHED] +=
384                 (int) new.cached_sectors - (int) old.cached_sectors;
385         dev_usage->sectors_fragmented +=
386                 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
387
388         if (!is_available_bucket(old) && is_available_bucket(new))
389                 bch2_wake_allocator(ca);
390
391         bch2_dev_stats_verify(ca);
392 }
393
394 void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca)
395 {
396         struct bucket_mark old = { .v.counter = 0 };
397         struct bch_fs_usage *fs_usage;
398         struct bucket_array *buckets;
399         struct bucket *g;
400
401         percpu_down_read_preempt_disable(&c->mark_lock);
402         fs_usage = this_cpu_ptr(c->usage[0]);
403         buckets = bucket_array(ca);
404
405         for_each_bucket(g, buckets)
406                 if (g->mark.data_type)
407                         bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
408         percpu_up_read_preempt_enable(&c->mark_lock);
409 }
410
411 #define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr)      \
412 ({                                                              \
413         struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
414                                                                 \
415         bch2_dev_usage_update(c, ca, fs_usage, _old, new, gc);  \
416         _old;                                                   \
417 })
418
419 static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
420                                      size_t b, struct bucket_mark *old,
421                                      bool gc)
422 {
423         struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
424         struct bucket *g = __bucket(ca, b, gc);
425         struct bucket_mark new;
426
427         *old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
428                 BUG_ON(!is_available_bucket(new));
429
430                 new.owned_by_allocator  = 1;
431                 new.data_type           = 0;
432                 new.cached_sectors      = 0;
433                 new.dirty_sectors       = 0;
434                 new.gen++;
435         }));
436
437         fs_usage->replicas[0].data[BCH_DATA_CACHED]     -= old->cached_sectors;
438         fs_usage->s.cached                              -= old->cached_sectors;
439 }
440
441 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
442                             size_t b, struct bucket_mark *old)
443 {
444         percpu_rwsem_assert_held(&c->mark_lock);
445
446         __bch2_invalidate_bucket(c, ca, b, old, false);
447
448         if (!old->owned_by_allocator && old->cached_sectors)
449                 trace_invalidate(ca, bucket_to_sector(ca, b),
450                                  old->cached_sectors);
451 }
452
453 static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
454                                      size_t b, bool owned_by_allocator,
455                                      bool gc)
456 {
457         struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
458         struct bucket *g = __bucket(ca, b, gc);
459         struct bucket_mark old, new;
460
461         old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
462                 new.owned_by_allocator  = owned_by_allocator;
463         }));
464
465         BUG_ON(!gc &&
466                !owned_by_allocator && !old.owned_by_allocator);
467 }
468
469 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
470                             size_t b, bool owned_by_allocator,
471                             struct gc_pos pos, unsigned flags)
472 {
473         percpu_rwsem_assert_held(&c->mark_lock);
474
475         if (!(flags & BCH_BUCKET_MARK_GC))
476                 __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false);
477
478         if ((flags & BCH_BUCKET_MARK_GC) ||
479             gc_visited(c, pos))
480                 __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, true);
481 }
482
483 #define checked_add(a, b)                                       \
484 do {                                                            \
485         unsigned _res = (unsigned) (a) + (b);                   \
486         (a) = _res;                                             \
487         BUG_ON((a) != _res);                                    \
488 } while (0)
489
490 static void __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
491                                         size_t b, enum bch_data_type type,
492                                         unsigned sectors, bool gc)
493 {
494         struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
495         struct bucket *g = __bucket(ca, b, gc);
496         struct bucket_mark new;
497
498         BUG_ON(type != BCH_DATA_SB &&
499                type != BCH_DATA_JOURNAL);
500
501         bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
502                 new.data_type   = type;
503                 checked_add(new.dirty_sectors, sectors);
504         }));
505
506         if (type == BCH_DATA_BTREE ||
507             type == BCH_DATA_USER)
508                 fs_usage->s.data                += sectors;
509         fs_usage->replicas[0].data[type]        += sectors;
510 }
511
512 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
513                                size_t b, enum bch_data_type type,
514                                unsigned sectors, struct gc_pos pos,
515                                unsigned flags)
516 {
517         BUG_ON(type != BCH_DATA_SB &&
518                type != BCH_DATA_JOURNAL);
519
520         if (likely(c)) {
521                 percpu_rwsem_assert_held(&c->mark_lock);
522
523                 if (!(flags & BCH_BUCKET_MARK_GC))
524                         __bch2_mark_metadata_bucket(c, ca, b, type, sectors,
525                                                     false);
526                 if ((flags & BCH_BUCKET_MARK_GC) ||
527                     gc_visited(c, pos))
528                         __bch2_mark_metadata_bucket(c, ca, b, type, sectors,
529                                                     true);
530         } else {
531                 struct bucket *g;
532                 struct bucket_mark old, new;
533
534                 rcu_read_lock();
535
536                 g = bucket(ca, b);
537                 old = bucket_cmpxchg(g, new, ({
538                         new.data_type = type;
539                         checked_add(new.dirty_sectors, sectors);
540                 }));
541
542                 rcu_read_unlock();
543         }
544 }
545
546 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
547                                   s64 delta)
548 {
549         if (delta > 0) {
550                 /*
551                  * marking a new extent, which _will have size_ @delta
552                  *
553                  * in the bch2_mark_update -> BCH_EXTENT_OVERLAP_MIDDLE
554                  * case, we haven't actually created the key we'll be inserting
555                  * yet (for the split) - so we don't want to be using
556                  * k->size/crc.live_size here:
557                  */
558                 return __ptr_disk_sectors(p, delta);
559         } else {
560                 BUG_ON(-delta > p.crc.live_size);
561
562                 return (s64) __ptr_disk_sectors(p, p.crc.live_size + delta) -
563                         (s64) ptr_disk_sectors(p);
564         }
565 }
566
567 /*
568  * Checking against gc's position has to be done here, inside the cmpxchg()
569  * loop, to avoid racing with the start of gc clearing all the marks - GC does
570  * that with the gc pos seqlock held.
571  */
572 static void bch2_mark_pointer(struct bch_fs *c,
573                               struct extent_ptr_decoded p,
574                               s64 sectors, enum bch_data_type data_type,
575                               struct bch_fs_usage *fs_usage,
576                               unsigned journal_seq, unsigned flags,
577                               bool gc)
578 {
579         struct bucket_mark old, new;
580         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
581         size_t b = PTR_BUCKET_NR(ca, &p.ptr);
582         struct bucket *g = __bucket(ca, b, gc);
583         u64 v;
584
585         v = atomic64_read(&g->_mark.v);
586         do {
587                 new.v.counter = old.v.counter = v;
588
589                 /*
590                  * Check this after reading bucket mark to guard against
591                  * the allocator invalidating a bucket after we've already
592                  * checked the gen
593                  */
594                 if (gen_after(new.gen, p.ptr.gen)) {
595                         BUG_ON(!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags));
596                         EBUG_ON(!p.ptr.cached &&
597                                 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
598                         return;
599                 }
600
601                 if (!p.ptr.cached)
602                         checked_add(new.dirty_sectors, sectors);
603                 else
604                         checked_add(new.cached_sectors, sectors);
605
606                 if (!new.dirty_sectors &&
607                     !new.cached_sectors) {
608                         new.data_type   = 0;
609
610                         if (journal_seq) {
611                                 new.journal_seq_valid = 1;
612                                 new.journal_seq = journal_seq;
613                         }
614                 } else {
615                         new.data_type = data_type;
616                 }
617
618                 if (flags & BCH_BUCKET_MARK_NOATOMIC) {
619                         g->_mark = new;
620                         break;
621                 }
622         } while ((v = atomic64_cmpxchg(&g->_mark.v,
623                               old.v.counter,
624                               new.v.counter)) != old.v.counter);
625
626         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
627
628         BUG_ON(!gc && bucket_became_unavailable(old, new));
629 }
630
631 static int bch2_mark_stripe_ptr(struct bch_fs *c,
632                                 struct bch_extent_stripe_ptr p,
633                                 s64 sectors, unsigned flags,
634                                 s64 *adjusted_disk_sectors,
635                                 unsigned *redundancy,
636                                 bool gc)
637 {
638         struct stripe *m;
639         unsigned old, new, nr_data;
640         int blocks_nonempty_delta;
641         s64 parity_sectors;
642
643         m = genradix_ptr(&c->stripes[gc], p.idx);
644
645         if (!m || !m->alive) {
646                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
647                                     (u64) p.idx);
648                 return -1;
649         }
650
651         nr_data = m->nr_blocks - m->nr_redundant;
652
653         parity_sectors = DIV_ROUND_UP(abs(sectors) * m->nr_redundant, nr_data);
654
655         if (sectors < 0)
656                 parity_sectors = -parity_sectors;
657
658         *adjusted_disk_sectors += parity_sectors;
659
660         *redundancy = max_t(unsigned, *redundancy, m->nr_redundant + 1);
661
662         new = atomic_add_return(sectors, &m->block_sectors[p.block]);
663         old = new - sectors;
664
665         blocks_nonempty_delta = (int) !!new - (int) !!old;
666         if (!blocks_nonempty_delta)
667                 return 0;
668
669         atomic_add(blocks_nonempty_delta, &m->blocks_nonempty);
670
671         BUG_ON(atomic_read(&m->blocks_nonempty) < 0);
672
673         if (!gc)
674                 bch2_stripes_heap_update(c, m, p.idx);
675
676         return 0;
677 }
678
679 static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
680                             s64 sectors, enum bch_data_type data_type,
681                             struct bch_fs_usage *fs_usage,
682                             unsigned journal_seq, unsigned flags,
683                             bool gc)
684 {
685         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
686         const union bch_extent_entry *entry;
687         struct extent_ptr_decoded p;
688         s64 cached_sectors      = 0;
689         s64 dirty_sectors       = 0;
690         s64 ec_sectors          = 0;
691         unsigned replicas       = 0;
692         unsigned ec_redundancy  = 0;
693         unsigned i;
694         int ret;
695
696         BUG_ON(!sectors);
697
698         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
699                 s64 disk_sectors = data_type == BCH_DATA_BTREE
700                         ? sectors
701                         : ptr_disk_sectors_delta(p, sectors);
702                 s64 adjusted_disk_sectors = disk_sectors;
703
704                 bch2_mark_pointer(c, p, disk_sectors, data_type,
705                                   fs_usage, journal_seq, flags, gc);
706
707                 if (!p.ptr.cached)
708                         for (i = 0; i < p.ec_nr; i++) {
709                                 ret = bch2_mark_stripe_ptr(c, p.ec[i],
710                                                 disk_sectors, flags,
711                                                 &adjusted_disk_sectors,
712                                                 &ec_redundancy, gc);
713                                 if (ret)
714                                         return ret;
715                         }
716                 if (!p.ptr.cached)
717                         replicas++;
718
719                 if (p.ptr.cached)
720                         cached_sectors  += adjusted_disk_sectors;
721                 else if (!p.ec_nr)
722                         dirty_sectors   += adjusted_disk_sectors;
723                 else
724                         ec_sectors      += adjusted_disk_sectors;
725         }
726
727         replicas        = clamp_t(unsigned,     replicas,
728                                   1, ARRAY_SIZE(fs_usage->replicas));
729         ec_redundancy   = clamp_t(unsigned,     ec_redundancy,
730                                   1, ARRAY_SIZE(fs_usage->replicas));
731
732         fs_usage->s.cached                                      += cached_sectors;
733         fs_usage->replicas[0].data[BCH_DATA_CACHED]             += cached_sectors;
734
735         fs_usage->s.data                                        += dirty_sectors;
736         fs_usage->replicas[replicas - 1].data[data_type]        += dirty_sectors;
737
738         fs_usage->s.data                                        += ec_sectors;
739         fs_usage->replicas[ec_redundancy - 1].ec_data           += ec_sectors;
740
741         return 0;
742 }
743
744 static void bucket_set_stripe(struct bch_fs *c,
745                               const struct bch_stripe *v,
746                               bool enabled,
747                               struct bch_fs_usage *fs_usage,
748                               u64 journal_seq,
749                               bool gc)
750 {
751         unsigned i;
752
753         for (i = 0; i < v->nr_blocks; i++) {
754                 const struct bch_extent_ptr *ptr = v->ptrs + i;
755                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
756                 size_t b = PTR_BUCKET_NR(ca, ptr);
757                 struct bucket *g = __bucket(ca, b, gc);
758                 struct bucket_mark new, old;
759
760                 BUG_ON(ptr_stale(ca, ptr));
761
762                 old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
763                         new.stripe                      = enabled;
764                         if (journal_seq) {
765                                 new.journal_seq_valid   = 1;
766                                 new.journal_seq         = journal_seq;
767                         }
768                 }));
769
770                 BUG_ON(old.stripe == enabled);
771         }
772 }
773
774 static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
775                             bool inserting,
776                             struct bch_fs_usage *fs_usage,
777                             u64 journal_seq, unsigned flags,
778                             bool gc)
779 {
780         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
781         size_t idx = s.k->p.offset;
782         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
783         unsigned i;
784
785         if (!m || (!inserting && !m->alive)) {
786                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
787                                     idx);
788                 return -1;
789         }
790
791         if (inserting && m->alive) {
792                 bch_err_ratelimited(c, "error marking stripe %zu: already exists",
793                                     idx);
794                 return -1;
795         }
796
797         BUG_ON(atomic_read(&m->blocks_nonempty));
798
799         for (i = 0; i < EC_STRIPE_MAX; i++)
800                 BUG_ON(atomic_read(&m->block_sectors[i]));
801
802         if (inserting) {
803                 m->sectors      = le16_to_cpu(s.v->sectors);
804                 m->algorithm    = s.v->algorithm;
805                 m->nr_blocks    = s.v->nr_blocks;
806                 m->nr_redundant = s.v->nr_redundant;
807         }
808
809         if (!gc) {
810                 if (inserting)
811                         bch2_stripes_heap_insert(c, m, idx);
812                 else
813                         bch2_stripes_heap_del(c, m, idx);
814         } else {
815                 m->alive = inserting;
816         }
817
818         bucket_set_stripe(c, s.v, inserting, fs_usage, 0, gc);
819         return 0;
820 }
821
822 static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
823                            bool inserting, s64 sectors,
824                            struct bch_fs_usage *fs_usage,
825                            unsigned journal_seq, unsigned flags,
826                            bool gc)
827 {
828         int ret = 0;
829
830         switch (k.k->type) {
831         case KEY_TYPE_btree_ptr:
832                 ret = bch2_mark_extent(c, k, inserting
833                                        ?  c->opts.btree_node_size
834                                        : -c->opts.btree_node_size,
835                                        BCH_DATA_BTREE,
836                                        fs_usage, journal_seq, flags, gc);
837                 break;
838         case KEY_TYPE_extent:
839                 ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
840                                        fs_usage, journal_seq, flags, gc);
841                 break;
842         case KEY_TYPE_stripe:
843                 ret = bch2_mark_stripe(c, k, inserting,
844                                        fs_usage, journal_seq, flags, gc);
845                 break;
846         case KEY_TYPE_alloc:
847                 if (inserting)
848                         fs_usage->s.nr_inodes++;
849                 else
850                         fs_usage->s.nr_inodes--;
851                 break;
852         case KEY_TYPE_reservation: {
853                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
854
855                 sectors *= replicas;
856                 replicas = clamp_t(unsigned, replicas,
857                                    1, ARRAY_SIZE(fs_usage->replicas));
858
859                 fs_usage->s.reserved                                    += sectors;
860                 fs_usage->replicas[replicas - 1].persistent_reserved    += sectors;
861                 break;
862         }
863         default:
864                 break;
865         }
866
867         return ret;
868 }
869
870 int bch2_mark_key_locked(struct bch_fs *c,
871                    struct bkey_s_c k,
872                    bool inserting, s64 sectors,
873                    struct gc_pos pos,
874                    struct bch_fs_usage *fs_usage,
875                    u64 journal_seq, unsigned flags)
876 {
877         int ret;
878
879         if (!(flags & BCH_BUCKET_MARK_GC)) {
880                 ret = __bch2_mark_key(c, k, inserting, sectors,
881                                       fs_usage ?: this_cpu_ptr(c->usage[0]),
882                                       journal_seq, flags, false);
883                 if (ret)
884                         return ret;
885         }
886
887         if ((flags & BCH_BUCKET_MARK_GC) ||
888             gc_visited(c, pos)) {
889                 ret = __bch2_mark_key(c, k, inserting, sectors,
890                                       this_cpu_ptr(c->usage[1]),
891                                       journal_seq, flags, true);
892                 if (ret)
893                         return ret;
894         }
895
896         return 0;
897 }
898
899 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
900                   bool inserting, s64 sectors,
901                   struct gc_pos pos,
902                   struct bch_fs_usage *fs_usage,
903                   u64 journal_seq, unsigned flags)
904 {
905         int ret;
906
907         percpu_down_read_preempt_disable(&c->mark_lock);
908         ret = bch2_mark_key_locked(c, k, inserting, sectors,
909                                    pos, fs_usage, journal_seq, flags);
910         percpu_up_read_preempt_enable(&c->mark_lock);
911
912         return ret;
913 }
914
915 void bch2_mark_update(struct btree_insert *trans,
916                       struct btree_insert_entry *insert)
917 {
918         struct bch_fs           *c = trans->c;
919         struct btree_iter       *iter = insert->iter;
920         struct btree            *b = iter->l[0].b;
921         struct btree_node_iter  node_iter = iter->l[0].iter;
922         struct bch_fs_usage     fs_usage = { 0 };
923         struct gc_pos           pos = gc_pos_btree_node(b);
924         struct bkey_packed      *_k;
925
926         if (!btree_node_type_needs_gc(iter->btree_id))
927                 return;
928
929         percpu_down_read_preempt_disable(&c->mark_lock);
930
931         if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
932                 bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
933                         bpos_min(insert->k->k.p, b->key.k.p).offset -
934                         bkey_start_offset(&insert->k->k),
935                         pos, &fs_usage, trans->journal_res.seq, 0);
936
937         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
938                                                       KEY_TYPE_discard))) {
939                 struct bkey             unpacked;
940                 struct bkey_s_c         k;
941                 s64                     sectors = 0;
942
943                 k = bkey_disassemble(b, _k, &unpacked);
944
945                 if (btree_node_is_extents(b)
946                     ? bkey_cmp(insert->k->k.p, bkey_start_pos(k.k)) <= 0
947                     : bkey_cmp(insert->k->k.p, k.k->p))
948                         break;
949
950                 if (btree_node_is_extents(b)) {
951                         switch (bch2_extent_overlap(&insert->k->k, k.k)) {
952                         case BCH_EXTENT_OVERLAP_ALL:
953                                 sectors = -((s64) k.k->size);
954                                 break;
955                         case BCH_EXTENT_OVERLAP_BACK:
956                                 sectors = bkey_start_offset(&insert->k->k) -
957                                         k.k->p.offset;
958                                 break;
959                         case BCH_EXTENT_OVERLAP_FRONT:
960                                 sectors = bkey_start_offset(k.k) -
961                                         insert->k->k.p.offset;
962                                 break;
963                         case BCH_EXTENT_OVERLAP_MIDDLE:
964                                 sectors = k.k->p.offset - insert->k->k.p.offset;
965                                 BUG_ON(sectors <= 0);
966
967                                 bch2_mark_key_locked(c, k, true, sectors,
968                                         pos, &fs_usage, trans->journal_res.seq, 0);
969
970                                 sectors = bkey_start_offset(&insert->k->k) -
971                                         k.k->p.offset;
972                                 break;
973                         }
974
975                         BUG_ON(sectors >= 0);
976                 }
977
978                 bch2_mark_key_locked(c, k, false, sectors,
979                         pos, &fs_usage, trans->journal_res.seq, 0);
980
981                 bch2_btree_node_iter_advance(&node_iter, b);
982         }
983
984         bch2_fs_usage_apply(c, &fs_usage, trans->disk_res, pos);
985
986         percpu_up_read_preempt_enable(&c->mark_lock);
987 }
988
989 /* Disk reservations: */
990
991 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
992 {
993         int cpu;
994
995         for_each_possible_cpu(cpu)
996                 per_cpu_ptr(c->pcpu, cpu)->sectors_available = 0;
997
998         return avail_factor(bch2_fs_sectors_free(c));
999 }
1000
1001 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
1002 {
1003         percpu_down_read_preempt_disable(&c->mark_lock);
1004         this_cpu_sub(c->usage[0]->s.online_reserved,
1005                      res->sectors);
1006
1007         bch2_fs_stats_verify(c);
1008         percpu_up_read_preempt_enable(&c->mark_lock);
1009
1010         res->sectors = 0;
1011 }
1012
1013 #define SECTORS_CACHE   1024
1014
1015 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1016                               unsigned sectors, int flags)
1017 {
1018         struct bch_fs_pcpu *pcpu;
1019         u64 old, v, get;
1020         s64 sectors_available;
1021         int ret;
1022
1023         percpu_down_read_preempt_disable(&c->mark_lock);
1024         pcpu = this_cpu_ptr(c->pcpu);
1025
1026         if (sectors <= pcpu->sectors_available)
1027                 goto out;
1028
1029         v = atomic64_read(&c->sectors_available);
1030         do {
1031                 old = v;
1032                 get = min((u64) sectors + SECTORS_CACHE, old);
1033
1034                 if (get < sectors) {
1035                         percpu_up_read_preempt_enable(&c->mark_lock);
1036                         goto recalculate;
1037                 }
1038         } while ((v = atomic64_cmpxchg(&c->sectors_available,
1039                                        old, old - get)) != old);
1040
1041         pcpu->sectors_available         += get;
1042
1043 out:
1044         pcpu->sectors_available         -= sectors;
1045         this_cpu_add(c->usage[0]->s.online_reserved, sectors);
1046         res->sectors                    += sectors;
1047
1048         bch2_disk_reservations_verify(c, flags);
1049         bch2_fs_stats_verify(c);
1050         percpu_up_read_preempt_enable(&c->mark_lock);
1051         return 0;
1052
1053 recalculate:
1054         /*
1055          * GC recalculates sectors_available when it starts, so that hopefully
1056          * we don't normally end up blocking here:
1057          */
1058
1059         /*
1060          * Piss fuck, we can be called from extent_insert_fixup() with btree
1061          * locks held:
1062          */
1063
1064         if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) {
1065                 if (!(flags & BCH_DISK_RESERVATION_BTREE_LOCKS_HELD))
1066                         down_read(&c->gc_lock);
1067                 else if (!down_read_trylock(&c->gc_lock))
1068                         return -EINTR;
1069         }
1070
1071         percpu_down_write(&c->mark_lock);
1072         sectors_available = bch2_recalc_sectors_available(c);
1073
1074         if (sectors <= sectors_available ||
1075             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1076                 atomic64_set(&c->sectors_available,
1077                              max_t(s64, 0, sectors_available - sectors));
1078                 this_cpu_add(c->usage[0]->s.online_reserved, sectors);
1079                 res->sectors                    += sectors;
1080                 ret = 0;
1081
1082                 bch2_disk_reservations_verify(c, flags);
1083         } else {
1084                 atomic64_set(&c->sectors_available, sectors_available);
1085                 ret = -ENOSPC;
1086         }
1087
1088         bch2_fs_stats_verify(c);
1089         percpu_up_write(&c->mark_lock);
1090
1091         if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
1092                 up_read(&c->gc_lock);
1093
1094         return ret;
1095 }
1096
1097 /* Startup/shutdown: */
1098
1099 static void buckets_free_rcu(struct rcu_head *rcu)
1100 {
1101         struct bucket_array *buckets =
1102                 container_of(rcu, struct bucket_array, rcu);
1103
1104         kvpfree(buckets,
1105                 sizeof(struct bucket_array) +
1106                 buckets->nbuckets * sizeof(struct bucket));
1107 }
1108
1109 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1110 {
1111         struct bucket_array *buckets = NULL, *old_buckets = NULL;
1112         unsigned long *buckets_nouse = NULL;
1113         unsigned long *buckets_written = NULL;
1114         u8 *oldest_gens = NULL;
1115         alloc_fifo      free[RESERVE_NR];
1116         alloc_fifo      free_inc;
1117         alloc_heap      alloc_heap;
1118         copygc_heap     copygc_heap;
1119
1120         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1121                              ca->mi.bucket_size / c->opts.btree_node_size);
1122         /* XXX: these should be tunable */
1123         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
1124         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 7);
1125         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
1126                                       btree_reserve);
1127         bool resize = ca->buckets[0] != NULL,
1128              start_copygc = ca->copygc_thread != NULL;
1129         int ret = -ENOMEM;
1130         unsigned i;
1131
1132         memset(&free,           0, sizeof(free));
1133         memset(&free_inc,       0, sizeof(free_inc));
1134         memset(&alloc_heap,     0, sizeof(alloc_heap));
1135         memset(&copygc_heap,    0, sizeof(copygc_heap));
1136
1137         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
1138                                             nbuckets * sizeof(struct bucket),
1139                                             GFP_KERNEL|__GFP_ZERO)) ||
1140             !(oldest_gens       = kvpmalloc(nbuckets * sizeof(u8),
1141                                             GFP_KERNEL|__GFP_ZERO)) ||
1142             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1143                                             sizeof(unsigned long),
1144                                             GFP_KERNEL|__GFP_ZERO)) ||
1145             !(buckets_written   = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1146                                             sizeof(unsigned long),
1147                                             GFP_KERNEL|__GFP_ZERO)) ||
1148             !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
1149             !init_fifo(&free[RESERVE_MOVINGGC],
1150                        copygc_reserve, GFP_KERNEL) ||
1151             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
1152             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
1153             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) ||
1154             !init_heap(&copygc_heap,    copygc_reserve, GFP_KERNEL))
1155                 goto err;
1156
1157         buckets->first_bucket   = ca->mi.first_bucket;
1158         buckets->nbuckets       = nbuckets;
1159
1160         bch2_copygc_stop(ca);
1161
1162         if (resize) {
1163                 down_write(&c->gc_lock);
1164                 down_write(&ca->bucket_lock);
1165                 percpu_down_write(&c->mark_lock);
1166         }
1167
1168         old_buckets = bucket_array(ca);
1169
1170         if (resize) {
1171                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
1172
1173                 memcpy(buckets->b,
1174                        old_buckets->b,
1175                        n * sizeof(struct bucket));
1176                 memcpy(oldest_gens,
1177                        ca->oldest_gens,
1178                        n * sizeof(u8));
1179                 memcpy(buckets_nouse,
1180                        ca->buckets_nouse,
1181                        BITS_TO_LONGS(n) * sizeof(unsigned long));
1182                 memcpy(buckets_written,
1183                        ca->buckets_written,
1184                        BITS_TO_LONGS(n) * sizeof(unsigned long));
1185         }
1186
1187         rcu_assign_pointer(ca->buckets[0], buckets);
1188         buckets = old_buckets;
1189
1190         swap(ca->oldest_gens, oldest_gens);
1191         swap(ca->buckets_nouse, buckets_nouse);
1192         swap(ca->buckets_written, buckets_written);
1193
1194         if (resize)
1195                 percpu_up_write(&c->mark_lock);
1196
1197         spin_lock(&c->freelist_lock);
1198         for (i = 0; i < RESERVE_NR; i++) {
1199                 fifo_move(&free[i], &ca->free[i]);
1200                 swap(ca->free[i], free[i]);
1201         }
1202         fifo_move(&free_inc, &ca->free_inc);
1203         swap(ca->free_inc, free_inc);
1204         spin_unlock(&c->freelist_lock);
1205
1206         /* with gc lock held, alloc_heap can't be in use: */
1207         swap(ca->alloc_heap, alloc_heap);
1208
1209         /* and we shut down copygc: */
1210         swap(ca->copygc_heap, copygc_heap);
1211
1212         nbuckets = ca->mi.nbuckets;
1213
1214         if (resize) {
1215                 up_write(&ca->bucket_lock);
1216                 up_write(&c->gc_lock);
1217         }
1218
1219         if (start_copygc &&
1220             bch2_copygc_start(c, ca))
1221                 bch_err(ca, "error restarting copygc thread");
1222
1223         ret = 0;
1224 err:
1225         free_heap(&copygc_heap);
1226         free_heap(&alloc_heap);
1227         free_fifo(&free_inc);
1228         for (i = 0; i < RESERVE_NR; i++)
1229                 free_fifo(&free[i]);
1230         kvpfree(buckets_nouse,
1231                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
1232         kvpfree(buckets_written,
1233                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
1234         kvpfree(oldest_gens,
1235                 nbuckets * sizeof(u8));
1236         if (buckets)
1237                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
1238
1239         return ret;
1240 }
1241
1242 void bch2_dev_buckets_free(struct bch_dev *ca)
1243 {
1244         unsigned i;
1245
1246         free_heap(&ca->copygc_heap);
1247         free_heap(&ca->alloc_heap);
1248         free_fifo(&ca->free_inc);
1249         for (i = 0; i < RESERVE_NR; i++)
1250                 free_fifo(&ca->free[i]);
1251         kvpfree(ca->buckets_written,
1252                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
1253         kvpfree(ca->buckets_nouse,
1254                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
1255         kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
1256         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
1257                 sizeof(struct bucket_array) +
1258                 ca->mi.nbuckets * sizeof(struct bucket));
1259
1260         free_percpu(ca->usage[0]);
1261 }
1262
1263 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1264 {
1265         if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
1266                 return -ENOMEM;
1267
1268         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
1269 }