]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
ea71acb5f8cfc6cb4b0d74b34ec9557542a51605
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 /*
2  * Code for manipulating bucket marks for garbage collection.
3  *
4  * Copyright 2014 Datera, Inc.
5  *
6  * Bucket states:
7  * - free bucket: mark == 0
8  *   The bucket contains no data and will not be read
9  *
10  * - allocator bucket: owned_by_allocator == 1
11  *   The bucket is on a free list, or it is an open bucket
12  *
13  * - cached bucket: owned_by_allocator == 0 &&
14  *                  dirty_sectors == 0 &&
15  *                  cached_sectors > 0
16  *   The bucket contains data but may be safely discarded as there are
17  *   enough replicas of the data on other cache devices, or it has been
18  *   written back to the backing device
19  *
20  * - dirty bucket: owned_by_allocator == 0 &&
21  *                 dirty_sectors > 0
22  *   The bucket contains data that we must not discard (either only copy,
23  *   or one of the 'main copies' for data requiring multiple replicas)
24  *
25  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
26  *   This is a btree node, journal or gen/prio bucket
27  *
28  * Lifecycle:
29  *
30  * bucket invalidated => bucket on freelist => open bucket =>
31  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
32  *
33  * Note that cache promotion can skip the dirty bucket step, as data
34  * is copied from a deeper tier to a shallower tier, onto a cached
35  * bucket.
36  * Note also that a cached bucket can spontaneously become dirty --
37  * see below.
38  *
39  * Only a traversal of the key space can determine whether a bucket is
40  * truly dirty or cached.
41  *
42  * Transitions:
43  *
44  * - free => allocator: bucket was invalidated
45  * - cached => allocator: bucket was invalidated
46  *
47  * - allocator => dirty: open bucket was filled up
48  * - allocator => cached: open bucket was filled up
49  * - allocator => metadata: metadata was allocated
50  *
51  * - dirty => cached: dirty sectors were copied to a deeper tier
52  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
53  * - cached => free: cached sectors were overwritten
54  *
55  * - metadata => free: metadata was freed
56  *
57  * Oddities:
58  * - cached => dirty: a device was removed so formerly replicated data
59  *                    is no longer sufficiently replicated
60  * - free => cached: cannot happen
61  * - free => dirty: cannot happen
62  * - free => metadata: cannot happen
63  */
64
65 #include "bcachefs.h"
66 #include "alloc_background.h"
67 #include "bset.h"
68 #include "btree_gc.h"
69 #include "btree_update.h"
70 #include "buckets.h"
71 #include "ec.h"
72 #include "error.h"
73 #include "movinggc.h"
74 #include "replicas.h"
75
76 #include <linux/preempt.h>
77 #include <trace/events/bcachefs.h>
78
79 /*
80  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
81  * wraparound:
82  */
83 void bch2_bucket_seq_cleanup(struct bch_fs *c)
84 {
85         u64 journal_seq = atomic64_read(&c->journal.seq);
86         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
87         struct bch_dev *ca;
88         struct bucket_array *buckets;
89         struct bucket *g;
90         struct bucket_mark m;
91         unsigned i;
92
93         if (journal_seq - c->last_bucket_seq_cleanup <
94             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
95                 return;
96
97         c->last_bucket_seq_cleanup = journal_seq;
98
99         for_each_member_device(ca, c, i) {
100                 down_read(&ca->bucket_lock);
101                 buckets = bucket_array(ca);
102
103                 for_each_bucket(g, buckets) {
104                         bucket_cmpxchg(g, m, ({
105                                 if (!m.journal_seq_valid ||
106                                     bucket_needs_journal_commit(m, last_seq_ondisk))
107                                         break;
108
109                                 m.journal_seq_valid = 0;
110                         }));
111                 }
112                 up_read(&ca->bucket_lock);
113         }
114 }
115
116 #define bch2_usage_read_raw(_stats)                                     \
117 ({                                                                      \
118         typeof(*this_cpu_ptr(_stats)) _acc;                             \
119                                                                         \
120         memset(&_acc, 0, sizeof(_acc));                                 \
121         acc_u64s_percpu((u64 *) &_acc,                                  \
122                         (u64 __percpu *) _stats,                        \
123                         sizeof(_acc) / sizeof(u64));                    \
124                                                                         \
125         _acc;                                                           \
126 })
127
128 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
129 {
130         return bch2_usage_read_raw(ca->usage[0]);
131 }
132
133 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
134 {
135         struct bch_fs_usage *ret;
136         unsigned nr = READ_ONCE(c->replicas.nr);
137 retry:
138         ret = kzalloc(sizeof(*ret) + nr * sizeof(u64), GFP_NOFS);
139         if (unlikely(!ret))
140                 return NULL;
141
142         percpu_down_read_preempt_disable(&c->mark_lock);
143
144         if (unlikely(nr < c->replicas.nr)) {
145                 nr = c->replicas.nr;
146                 percpu_up_read_preempt_enable(&c->mark_lock);
147                 kfree(ret);
148                 goto retry;
149         }
150
151         acc_u64s_percpu((u64 *) ret,
152                         (u64 __percpu *) c->usage[0],
153                         sizeof(*ret) / sizeof(u64) + nr);
154
155         return ret;
156 }
157
158 #define RESERVE_FACTOR  6
159
160 static u64 reserve_factor(u64 r)
161 {
162         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
163 }
164
165 static u64 avail_factor(u64 r)
166 {
167         return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
168 }
169
170 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage fs_usage)
171 {
172         return min(fs_usage.s.hidden +
173                    fs_usage.s.data +
174                    reserve_factor(fs_usage.s.reserved +
175                                   fs_usage.s.online_reserved),
176                    c->capacity);
177 }
178
179 struct bch_fs_usage_short
180 bch2_fs_usage_read_short(struct bch_fs *c)
181 {
182         struct bch_fs_usage_summarized usage =
183                 bch2_usage_read_raw(&c->usage[0]->s);
184         struct bch_fs_usage_short ret;
185
186         ret.capacity    = READ_ONCE(c->capacity) - usage.hidden;
187         ret.used        = min(ret.capacity, usage.data +
188                               reserve_factor(usage.reserved +
189                                              usage.online_reserved));
190         ret.nr_inodes   = usage.nr_inodes;
191
192         return ret;
193 }
194
195 static inline int is_unavailable_bucket(struct bucket_mark m)
196 {
197         return !is_available_bucket(m);
198 }
199
200 static inline int is_fragmented_bucket(struct bucket_mark m,
201                                        struct bch_dev *ca)
202 {
203         if (!m.owned_by_allocator &&
204             m.data_type == BCH_DATA_USER &&
205             bucket_sectors_used(m))
206                 return max_t(int, 0, (int) ca->mi.bucket_size -
207                              bucket_sectors_used(m));
208         return 0;
209 }
210
211 static inline enum bch_data_type bucket_type(struct bucket_mark m)
212 {
213         return m.cached_sectors && !m.dirty_sectors
214                 ? BCH_DATA_CACHED
215                 : m.data_type;
216 }
217
218 static bool bucket_became_unavailable(struct bucket_mark old,
219                                       struct bucket_mark new)
220 {
221         return is_available_bucket(old) &&
222                !is_available_bucket(new);
223 }
224
225 int bch2_fs_usage_apply(struct bch_fs *c,
226                         struct bch_fs_usage *fs_usage,
227                         struct disk_reservation *disk_res,
228                         struct gc_pos gc_pos)
229 {
230         s64 added = fs_usage->s.data + fs_usage->s.reserved;
231         s64 should_not_have_added;
232         int ret = 0;
233
234         percpu_rwsem_assert_held(&c->mark_lock);
235
236         /*
237          * Not allowed to reduce sectors_available except by getting a
238          * reservation:
239          */
240         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
241         if (WARN_ONCE(should_not_have_added > 0,
242                       "disk usage increased without a reservation")) {
243                 atomic64_sub(should_not_have_added, &c->sectors_available);
244                 added -= should_not_have_added;
245                 ret = -1;
246         }
247
248         if (added > 0) {
249                 disk_res->sectors               -= added;
250                 fs_usage->s.online_reserved     -= added;
251         }
252
253         acc_u64s((u64 *) this_cpu_ptr(c->usage[0]),
254                  (u64 *) fs_usage,
255                  sizeof(*fs_usage) / sizeof(u64) + c->replicas.nr);
256
257         if (gc_visited(c, gc_pos)) {
258                 BUG_ON(!c->usage[1]);
259                 acc_u64s((u64 *) this_cpu_ptr(c->usage[1]),
260                          (u64 *) fs_usage,
261                          sizeof(*fs_usage) / sizeof(u64) + c->replicas.nr);
262         }
263
264         return ret;
265 }
266
267 static inline void account_bucket(struct bch_fs_usage *fs_usage,
268                                   struct bch_dev_usage *dev_usage,
269                                   enum bch_data_type type,
270                                   int nr, s64 size)
271 {
272         if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
273                 fs_usage->s.hidden      += size;
274
275         dev_usage->buckets[type]        += nr;
276 }
277
278 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
279                                   struct bch_fs_usage *fs_usage,
280                                   struct bucket_mark old, struct bucket_mark new,
281                                   bool gc)
282 {
283         struct bch_dev_usage *dev_usage;
284
285         percpu_rwsem_assert_held(&c->mark_lock);
286
287         bch2_fs_inconsistent_on(old.data_type && new.data_type &&
288                                 old.data_type != new.data_type, c,
289                 "different types of data in same bucket: %s, %s",
290                 bch2_data_types[old.data_type],
291                 bch2_data_types[new.data_type]);
292
293         dev_usage = this_cpu_ptr(ca->usage[gc]);
294
295         if (bucket_type(old))
296                 account_bucket(fs_usage, dev_usage, bucket_type(old),
297                                -1, -ca->mi.bucket_size);
298
299         if (bucket_type(new))
300                 account_bucket(fs_usage, dev_usage, bucket_type(new),
301                                1, ca->mi.bucket_size);
302
303         dev_usage->buckets_alloc +=
304                 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
305         dev_usage->buckets_ec +=
306                 (int) new.stripe - (int) old.stripe;
307         dev_usage->buckets_unavailable +=
308                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
309
310         dev_usage->sectors[old.data_type] -= old.dirty_sectors;
311         dev_usage->sectors[new.data_type] += new.dirty_sectors;
312         dev_usage->sectors[BCH_DATA_CACHED] +=
313                 (int) new.cached_sectors - (int) old.cached_sectors;
314         dev_usage->sectors_fragmented +=
315                 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
316
317         if (!is_available_bucket(old) && is_available_bucket(new))
318                 bch2_wake_allocator(ca);
319 }
320
321 void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca)
322 {
323         struct bucket_mark old = { .v.counter = 0 };
324         struct bch_fs_usage *fs_usage;
325         struct bucket_array *buckets;
326         struct bucket *g;
327
328         percpu_down_read_preempt_disable(&c->mark_lock);
329         fs_usage = this_cpu_ptr(c->usage[0]);
330         buckets = bucket_array(ca);
331
332         for_each_bucket(g, buckets)
333                 if (g->mark.data_type)
334                         bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
335         percpu_up_read_preempt_enable(&c->mark_lock);
336 }
337
338 #define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr)      \
339 ({                                                              \
340         struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
341                                                                 \
342         bch2_dev_usage_update(c, ca, fs_usage, _old, new, gc);  \
343         _old;                                                   \
344 })
345
346 static inline void update_replicas(struct bch_fs *c,
347                                    struct bch_fs_usage *fs_usage,
348                                    struct bch_replicas_entry *r,
349                                    s64 sectors)
350 {
351         int idx = bch2_replicas_entry_idx(c, r);
352
353         BUG_ON(idx < 0);
354         BUG_ON(!sectors);
355
356         if (r->data_type == BCH_DATA_CACHED)
357                 fs_usage->s.cached      += sectors;
358         else
359                 fs_usage->s.data        += sectors;
360         fs_usage->data[idx]             += sectors;
361 }
362
363 static inline void update_cached_sectors(struct bch_fs *c,
364                                          struct bch_fs_usage *fs_usage,
365                                          unsigned dev, s64 sectors)
366 {
367         struct bch_replicas_padded r;
368
369         bch2_replicas_entry_cached(&r.e, dev);
370
371         update_replicas(c, fs_usage, &r.e, sectors);
372 }
373
374 static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
375                                      size_t b, struct bucket_mark *old,
376                                      bool gc)
377 {
378         struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
379         struct bucket *g = __bucket(ca, b, gc);
380         struct bucket_mark new;
381
382         *old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
383                 BUG_ON(!is_available_bucket(new));
384
385                 new.owned_by_allocator  = true;
386                 new.dirty               = true;
387                 new.data_type           = 0;
388                 new.cached_sectors      = 0;
389                 new.dirty_sectors       = 0;
390                 new.gen++;
391         }));
392
393         if (old->cached_sectors)
394                 update_cached_sectors(c, fs_usage, ca->dev_idx,
395                                       -old->cached_sectors);
396 }
397
398 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
399                             size_t b, struct bucket_mark *old)
400 {
401         percpu_rwsem_assert_held(&c->mark_lock);
402
403         __bch2_invalidate_bucket(c, ca, b, old, false);
404
405         if (!old->owned_by_allocator && old->cached_sectors)
406                 trace_invalidate(ca, bucket_to_sector(ca, b),
407                                  old->cached_sectors);
408 }
409
410 static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
411                                      size_t b, bool owned_by_allocator,
412                                      bool gc)
413 {
414         struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
415         struct bucket *g = __bucket(ca, b, gc);
416         struct bucket_mark old, new;
417
418         old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
419                 new.owned_by_allocator  = owned_by_allocator;
420         }));
421
422         BUG_ON(!gc &&
423                !owned_by_allocator && !old.owned_by_allocator);
424 }
425
426 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
427                             size_t b, bool owned_by_allocator,
428                             struct gc_pos pos, unsigned flags)
429 {
430         percpu_rwsem_assert_held(&c->mark_lock);
431
432         if (!(flags & BCH_BUCKET_MARK_GC))
433                 __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false);
434
435         if ((flags & BCH_BUCKET_MARK_GC) ||
436             gc_visited(c, pos))
437                 __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, true);
438 }
439
440 #define checked_add(a, b)                                       \
441 do {                                                            \
442         unsigned _res = (unsigned) (a) + (b);                   \
443         (a) = _res;                                             \
444         BUG_ON((a) != _res);                                    \
445 } while (0)
446
447 static void __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
448                                         size_t b, enum bch_data_type type,
449                                         unsigned sectors, bool gc)
450 {
451         struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
452         struct bucket *g = __bucket(ca, b, gc);
453         struct bucket_mark new;
454
455         BUG_ON(type != BCH_DATA_SB &&
456                type != BCH_DATA_JOURNAL);
457
458         bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
459                 new.dirty       = true;
460                 new.data_type   = type;
461                 checked_add(new.dirty_sectors, sectors);
462         }));
463 }
464
465 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
466                                size_t b, enum bch_data_type type,
467                                unsigned sectors, struct gc_pos pos,
468                                unsigned flags)
469 {
470         BUG_ON(type != BCH_DATA_SB &&
471                type != BCH_DATA_JOURNAL);
472
473         if (likely(c)) {
474                 percpu_rwsem_assert_held(&c->mark_lock);
475
476                 if (!(flags & BCH_BUCKET_MARK_GC))
477                         __bch2_mark_metadata_bucket(c, ca, b, type, sectors,
478                                                     false);
479                 if ((flags & BCH_BUCKET_MARK_GC) ||
480                     gc_visited(c, pos))
481                         __bch2_mark_metadata_bucket(c, ca, b, type, sectors,
482                                                     true);
483         } else {
484                 struct bucket *g;
485                 struct bucket_mark new;
486
487                 rcu_read_lock();
488
489                 g = bucket(ca, b);
490                 bucket_cmpxchg(g, new, ({
491                         new.dirty       = true;
492                         new.data_type   = type;
493                         checked_add(new.dirty_sectors, sectors);
494                 }));
495
496                 rcu_read_unlock();
497         }
498 }
499
500 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
501                                   s64 delta)
502 {
503         if (delta > 0) {
504                 /*
505                  * marking a new extent, which _will have size_ @delta
506                  *
507                  * in the bch2_mark_update -> BCH_EXTENT_OVERLAP_MIDDLE
508                  * case, we haven't actually created the key we'll be inserting
509                  * yet (for the split) - so we don't want to be using
510                  * k->size/crc.live_size here:
511                  */
512                 return __ptr_disk_sectors(p, delta);
513         } else {
514                 BUG_ON(-delta > p.crc.live_size);
515
516                 return (s64) __ptr_disk_sectors(p, p.crc.live_size + delta) -
517                         (s64) ptr_disk_sectors(p);
518         }
519 }
520
521 /*
522  * Checking against gc's position has to be done here, inside the cmpxchg()
523  * loop, to avoid racing with the start of gc clearing all the marks - GC does
524  * that with the gc pos seqlock held.
525  */
526 static void bch2_mark_pointer(struct bch_fs *c,
527                               struct extent_ptr_decoded p,
528                               s64 sectors, enum bch_data_type data_type,
529                               struct bch_fs_usage *fs_usage,
530                               unsigned journal_seq, unsigned flags,
531                               bool gc)
532 {
533         struct bucket_mark old, new;
534         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
535         size_t b = PTR_BUCKET_NR(ca, &p.ptr);
536         struct bucket *g = __bucket(ca, b, gc);
537         u64 v;
538
539         v = atomic64_read(&g->_mark.v);
540         do {
541                 new.v.counter = old.v.counter = v;
542
543                 new.dirty = true;
544
545                 /*
546                  * Check this after reading bucket mark to guard against
547                  * the allocator invalidating a bucket after we've already
548                  * checked the gen
549                  */
550                 if (gen_after(new.gen, p.ptr.gen)) {
551                         BUG_ON(!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags));
552                         EBUG_ON(!p.ptr.cached &&
553                                 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
554                         return;
555                 }
556
557                 if (!p.ptr.cached)
558                         checked_add(new.dirty_sectors, sectors);
559                 else
560                         checked_add(new.cached_sectors, sectors);
561
562                 if (!new.dirty_sectors &&
563                     !new.cached_sectors) {
564                         new.data_type   = 0;
565
566                         if (journal_seq) {
567                                 new.journal_seq_valid = 1;
568                                 new.journal_seq = journal_seq;
569                         }
570                 } else {
571                         new.data_type = data_type;
572                 }
573
574                 if (flags & BCH_BUCKET_MARK_NOATOMIC) {
575                         g->_mark = new;
576                         break;
577                 }
578         } while ((v = atomic64_cmpxchg(&g->_mark.v,
579                               old.v.counter,
580                               new.v.counter)) != old.v.counter);
581
582         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
583
584         BUG_ON(!gc && bucket_became_unavailable(old, new));
585 }
586
587 static int bch2_mark_stripe_ptr(struct bch_fs *c,
588                                 struct bch_extent_stripe_ptr p,
589                                 enum bch_data_type data_type,
590                                 struct bch_fs_usage *fs_usage,
591                                 s64 sectors, unsigned flags,
592                                 bool gc)
593 {
594         struct stripe *m;
595         unsigned old, new, nr_data;
596         int blocks_nonempty_delta;
597         s64 parity_sectors;
598
599         BUG_ON(!sectors);
600
601         m = genradix_ptr(&c->stripes[gc], p.idx);
602
603         spin_lock(&c->ec_stripes_heap_lock);
604
605         if (!m || !m->alive) {
606                 spin_unlock(&c->ec_stripes_heap_lock);
607                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
608                                     (u64) p.idx);
609                 return -1;
610         }
611
612         BUG_ON(m->r.e.data_type != data_type);
613
614         nr_data = m->nr_blocks - m->nr_redundant;
615
616         parity_sectors = DIV_ROUND_UP(abs(sectors) * m->nr_redundant, nr_data);
617
618         if (sectors < 0)
619                 parity_sectors = -parity_sectors;
620         sectors += parity_sectors;
621
622         old = m->block_sectors[p.block];
623         m->block_sectors[p.block] += sectors;
624         new = m->block_sectors[p.block];
625
626         blocks_nonempty_delta = (int) !!new - (int) !!old;
627         if (blocks_nonempty_delta) {
628                 m->blocks_nonempty += blocks_nonempty_delta;
629
630                 if (!gc)
631                         bch2_stripes_heap_update(c, m, p.idx);
632         }
633
634         m->dirty = true;
635
636         spin_unlock(&c->ec_stripes_heap_lock);
637
638         update_replicas(c, fs_usage, &m->r.e, sectors);
639
640         return 0;
641 }
642
643 static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
644                             s64 sectors, enum bch_data_type data_type,
645                             struct bch_fs_usage *fs_usage,
646                             unsigned journal_seq, unsigned flags,
647                             bool gc)
648 {
649         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
650         const union bch_extent_entry *entry;
651         struct extent_ptr_decoded p;
652         struct bch_replicas_padded r;
653         s64 dirty_sectors = 0;
654         unsigned i;
655         int ret;
656
657         r.e.data_type   = data_type;
658         r.e.nr_devs     = 0;
659         r.e.nr_required = 1;
660
661         BUG_ON(!sectors);
662
663         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
664                 s64 disk_sectors = data_type == BCH_DATA_BTREE
665                         ? sectors
666                         : ptr_disk_sectors_delta(p, sectors);
667
668                 bch2_mark_pointer(c, p, disk_sectors, data_type,
669                                   fs_usage, journal_seq, flags, gc);
670
671                 if (p.ptr.cached) {
672                         update_cached_sectors(c, fs_usage, p.ptr.dev,
673                                               disk_sectors);
674                 } else if (!p.ec_nr) {
675                         dirty_sectors          += disk_sectors;
676                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
677                 } else {
678                         for (i = 0; i < p.ec_nr; i++) {
679                                 ret = bch2_mark_stripe_ptr(c, p.ec[i],
680                                                 data_type, fs_usage,
681                                                 disk_sectors, flags, gc);
682                                 if (ret)
683                                         return ret;
684                         }
685
686                         r.e.nr_required = 0;
687                 }
688         }
689
690         if (dirty_sectors)
691                 update_replicas(c, fs_usage, &r.e, dirty_sectors);
692
693         return 0;
694 }
695
696 static void bucket_set_stripe(struct bch_fs *c,
697                               const struct bch_stripe *v,
698                               bool enabled,
699                               struct bch_fs_usage *fs_usage,
700                               u64 journal_seq,
701                               bool gc)
702 {
703         unsigned i;
704
705         for (i = 0; i < v->nr_blocks; i++) {
706                 const struct bch_extent_ptr *ptr = v->ptrs + i;
707                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
708                 size_t b = PTR_BUCKET_NR(ca, ptr);
709                 struct bucket *g = __bucket(ca, b, gc);
710                 struct bucket_mark new, old;
711
712                 BUG_ON(ptr_stale(ca, ptr));
713
714                 old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
715                         new.dirty                       = true;
716                         new.stripe                      = enabled;
717                         if (journal_seq) {
718                                 new.journal_seq_valid   = 1;
719                                 new.journal_seq         = journal_seq;
720                         }
721                 }));
722         }
723 }
724
725 static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
726                             bool inserting,
727                             struct bch_fs_usage *fs_usage,
728                             u64 journal_seq, unsigned flags,
729                             bool gc)
730 {
731         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
732         size_t idx = s.k->p.offset;
733         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
734         unsigned i;
735
736         spin_lock(&c->ec_stripes_heap_lock);
737
738         if (!m || (!inserting && !m->alive)) {
739                 spin_unlock(&c->ec_stripes_heap_lock);
740                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
741                                     idx);
742                 return -1;
743         }
744
745         if (m->alive)
746                 bch2_stripes_heap_del(c, m, idx);
747
748         memset(m, 0, sizeof(*m));
749
750         if (inserting) {
751                 m->sectors      = le16_to_cpu(s.v->sectors);
752                 m->algorithm    = s.v->algorithm;
753                 m->nr_blocks    = s.v->nr_blocks;
754                 m->nr_redundant = s.v->nr_redundant;
755
756                 memset(&m->r, 0, sizeof(m->r));
757
758                 m->r.e.data_type        = BCH_DATA_USER;
759                 m->r.e.nr_devs          = s.v->nr_blocks;
760                 m->r.e.nr_required      = s.v->nr_blocks - s.v->nr_redundant;
761
762                 for (i = 0; i < s.v->nr_blocks; i++)
763                         m->r.e.devs[i] = s.v->ptrs[i].dev;
764
765         /*
766          * XXX: account for stripes somehow here
767          */
768 #if 0
769         update_replicas(c, fs_usage, &m->r.e, stripe_sectors);
770 #endif
771
772                 /* gc recalculates these fields: */
773                 if (!(flags & BCH_BUCKET_MARK_GC)) {
774                         for (i = 0; i < s.v->nr_blocks; i++) {
775                                 m->block_sectors[i] =
776                                         stripe_blockcount_get(s.v, i);
777                                 m->blocks_nonempty += !!m->block_sectors[i];
778                         }
779                 }
780
781                 if (!gc)
782                         bch2_stripes_heap_insert(c, m, idx);
783                 else
784                         m->alive = true;
785         }
786
787         spin_unlock(&c->ec_stripes_heap_lock);
788
789         bucket_set_stripe(c, s.v, inserting, fs_usage, 0, gc);
790         return 0;
791 }
792
793 static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
794                            bool inserting, s64 sectors,
795                            struct bch_fs_usage *fs_usage,
796                            unsigned journal_seq, unsigned flags,
797                            bool gc)
798 {
799         int ret = 0;
800
801         switch (k.k->type) {
802         case KEY_TYPE_btree_ptr:
803                 ret = bch2_mark_extent(c, k, inserting
804                                        ?  c->opts.btree_node_size
805                                        : -c->opts.btree_node_size,
806                                        BCH_DATA_BTREE,
807                                        fs_usage, journal_seq, flags, gc);
808                 break;
809         case KEY_TYPE_extent:
810                 ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
811                                        fs_usage, journal_seq, flags, gc);
812                 break;
813         case KEY_TYPE_stripe:
814                 ret = bch2_mark_stripe(c, k, inserting,
815                                        fs_usage, journal_seq, flags, gc);
816                 break;
817         case KEY_TYPE_alloc:
818                 if (inserting)
819                         fs_usage->s.nr_inodes++;
820                 else
821                         fs_usage->s.nr_inodes--;
822                 break;
823         case KEY_TYPE_reservation: {
824                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
825
826                 sectors *= replicas;
827                 replicas = clamp_t(unsigned, replicas, 1,
828                                    ARRAY_SIZE(fs_usage->persistent_reserved));
829
830                 fs_usage->s.reserved                            += sectors;
831                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
832                 break;
833         }
834         default:
835                 break;
836         }
837
838         return ret;
839 }
840
841 int bch2_mark_key_locked(struct bch_fs *c,
842                    struct bkey_s_c k,
843                    bool inserting, s64 sectors,
844                    struct gc_pos pos,
845                    struct bch_fs_usage *fs_usage,
846                    u64 journal_seq, unsigned flags)
847 {
848         int ret;
849
850         if (!(flags & BCH_BUCKET_MARK_GC)) {
851                 ret = __bch2_mark_key(c, k, inserting, sectors,
852                                       fs_usage ?: this_cpu_ptr(c->usage[0]),
853                                       journal_seq, flags, false);
854                 if (ret)
855                         return ret;
856         }
857
858         if ((flags & BCH_BUCKET_MARK_GC) ||
859             gc_visited(c, pos)) {
860                 ret = __bch2_mark_key(c, k, inserting, sectors,
861                                       this_cpu_ptr(c->usage[1]),
862                                       journal_seq, flags, true);
863                 if (ret)
864                         return ret;
865         }
866
867         return 0;
868 }
869
870 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
871                   bool inserting, s64 sectors,
872                   struct gc_pos pos,
873                   struct bch_fs_usage *fs_usage,
874                   u64 journal_seq, unsigned flags)
875 {
876         int ret;
877
878         percpu_down_read_preempt_disable(&c->mark_lock);
879         ret = bch2_mark_key_locked(c, k, inserting, sectors,
880                                    pos, fs_usage, journal_seq, flags);
881         percpu_up_read_preempt_enable(&c->mark_lock);
882
883         return ret;
884 }
885
886 void bch2_mark_update(struct btree_insert *trans,
887                       struct btree_insert_entry *insert)
888 {
889         struct bch_fs           *c = trans->c;
890         struct btree_iter       *iter = insert->iter;
891         struct btree            *b = iter->l[0].b;
892         struct btree_node_iter  node_iter = iter->l[0].iter;
893         struct bch_fs_usage     *fs_usage;
894         struct gc_pos           pos = gc_pos_btree_node(b);
895         struct bkey_packed      *_k;
896         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
897         static int warned_disk_usage = 0;
898
899         if (!btree_node_type_needs_gc(iter->btree_id))
900                 return;
901
902         percpu_down_read_preempt_disable(&c->mark_lock);
903         fs_usage = bch2_fs_usage_get_scratch(c);
904
905         if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
906                 bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
907                         bpos_min(insert->k->k.p, b->key.k.p).offset -
908                         bkey_start_offset(&insert->k->k),
909                         pos, fs_usage, trans->journal_res.seq, 0);
910
911         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
912                                                       KEY_TYPE_discard))) {
913                 struct bkey             unpacked;
914                 struct bkey_s_c         k;
915                 s64                     sectors = 0;
916
917                 k = bkey_disassemble(b, _k, &unpacked);
918
919                 if (btree_node_is_extents(b)
920                     ? bkey_cmp(insert->k->k.p, bkey_start_pos(k.k)) <= 0
921                     : bkey_cmp(insert->k->k.p, k.k->p))
922                         break;
923
924                 if (btree_node_is_extents(b)) {
925                         switch (bch2_extent_overlap(&insert->k->k, k.k)) {
926                         case BCH_EXTENT_OVERLAP_ALL:
927                                 sectors = -((s64) k.k->size);
928                                 break;
929                         case BCH_EXTENT_OVERLAP_BACK:
930                                 sectors = bkey_start_offset(&insert->k->k) -
931                                         k.k->p.offset;
932                                 break;
933                         case BCH_EXTENT_OVERLAP_FRONT:
934                                 sectors = bkey_start_offset(k.k) -
935                                         insert->k->k.p.offset;
936                                 break;
937                         case BCH_EXTENT_OVERLAP_MIDDLE:
938                                 sectors = k.k->p.offset - insert->k->k.p.offset;
939                                 BUG_ON(sectors <= 0);
940
941                                 bch2_mark_key_locked(c, k, true, sectors,
942                                         pos, fs_usage, trans->journal_res.seq, 0);
943
944                                 sectors = bkey_start_offset(&insert->k->k) -
945                                         k.k->p.offset;
946                                 break;
947                         }
948
949                         BUG_ON(sectors >= 0);
950                 }
951
952                 bch2_mark_key_locked(c, k, false, sectors,
953                         pos, fs_usage, trans->journal_res.seq, 0);
954
955                 bch2_btree_node_iter_advance(&node_iter, b);
956         }
957
958         if (bch2_fs_usage_apply(c, fs_usage, trans->disk_res, pos) &&
959             !warned_disk_usage &&
960             !xchg(&warned_disk_usage, 1)) {
961                 char buf[200];
962
963                 pr_err("disk usage increased more than %llu sectors reserved", disk_res_sectors);
964
965                 pr_err("while inserting");
966                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(insert->k));
967                 pr_err("%s", buf);
968                 pr_err("overlapping with");
969
970                 node_iter = iter->l[0].iter;
971                 while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
972                                                               KEY_TYPE_discard))) {
973                         struct bkey             unpacked;
974                         struct bkey_s_c         k;
975
976                         k = bkey_disassemble(b, _k, &unpacked);
977
978                         if (btree_node_is_extents(b)
979                             ? bkey_cmp(insert->k->k.p, bkey_start_pos(k.k)) <= 0
980                             : bkey_cmp(insert->k->k.p, k.k->p))
981                                 break;
982
983                         bch2_bkey_val_to_text(&PBUF(buf), c, k);
984                         pr_err("%s", buf);
985
986                         bch2_btree_node_iter_advance(&node_iter, b);
987                 }
988         }
989
990         percpu_up_read_preempt_enable(&c->mark_lock);
991 }
992
993 /* Disk reservations: */
994
995 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
996 {
997         int cpu;
998
999         for_each_possible_cpu(cpu)
1000                 per_cpu_ptr(c->pcpu, cpu)->sectors_available = 0;
1001
1002         return avail_factor(bch2_fs_sectors_free(c));
1003 }
1004
1005 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
1006 {
1007         percpu_down_read_preempt_disable(&c->mark_lock);
1008         this_cpu_sub(c->usage[0]->s.online_reserved,
1009                      res->sectors);
1010         percpu_up_read_preempt_enable(&c->mark_lock);
1011
1012         res->sectors = 0;
1013 }
1014
1015 #define SECTORS_CACHE   1024
1016
1017 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1018                               unsigned sectors, int flags)
1019 {
1020         struct bch_fs_pcpu *pcpu;
1021         u64 old, v, get;
1022         s64 sectors_available;
1023         int ret;
1024
1025         percpu_down_read_preempt_disable(&c->mark_lock);
1026         pcpu = this_cpu_ptr(c->pcpu);
1027
1028         if (sectors <= pcpu->sectors_available)
1029                 goto out;
1030
1031         v = atomic64_read(&c->sectors_available);
1032         do {
1033                 old = v;
1034                 get = min((u64) sectors + SECTORS_CACHE, old);
1035
1036                 if (get < sectors) {
1037                         percpu_up_read_preempt_enable(&c->mark_lock);
1038                         goto recalculate;
1039                 }
1040         } while ((v = atomic64_cmpxchg(&c->sectors_available,
1041                                        old, old - get)) != old);
1042
1043         pcpu->sectors_available         += get;
1044
1045 out:
1046         pcpu->sectors_available         -= sectors;
1047         this_cpu_add(c->usage[0]->s.online_reserved, sectors);
1048         res->sectors                    += sectors;
1049
1050         percpu_up_read_preempt_enable(&c->mark_lock);
1051         return 0;
1052
1053 recalculate:
1054         /*
1055          * GC recalculates sectors_available when it starts, so that hopefully
1056          * we don't normally end up blocking here:
1057          */
1058
1059         /*
1060          * Piss fuck, we can be called from extent_insert_fixup() with btree
1061          * locks held:
1062          */
1063
1064         if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) {
1065                 if (!(flags & BCH_DISK_RESERVATION_BTREE_LOCKS_HELD))
1066                         down_read(&c->gc_lock);
1067                 else if (!down_read_trylock(&c->gc_lock))
1068                         return -EINTR;
1069         }
1070
1071         percpu_down_write(&c->mark_lock);
1072         sectors_available = bch2_recalc_sectors_available(c);
1073
1074         if (sectors <= sectors_available ||
1075             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1076                 atomic64_set(&c->sectors_available,
1077                              max_t(s64, 0, sectors_available - sectors));
1078                 this_cpu_add(c->usage[0]->s.online_reserved, sectors);
1079                 res->sectors                    += sectors;
1080                 ret = 0;
1081         } else {
1082                 atomic64_set(&c->sectors_available, sectors_available);
1083                 ret = -ENOSPC;
1084         }
1085
1086         percpu_up_write(&c->mark_lock);
1087
1088         if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
1089                 up_read(&c->gc_lock);
1090
1091         return ret;
1092 }
1093
1094 /* Startup/shutdown: */
1095
1096 static void buckets_free_rcu(struct rcu_head *rcu)
1097 {
1098         struct bucket_array *buckets =
1099                 container_of(rcu, struct bucket_array, rcu);
1100
1101         kvpfree(buckets,
1102                 sizeof(struct bucket_array) +
1103                 buckets->nbuckets * sizeof(struct bucket));
1104 }
1105
1106 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1107 {
1108         struct bucket_array *buckets = NULL, *old_buckets = NULL;
1109         unsigned long *buckets_nouse = NULL;
1110         unsigned long *buckets_written = NULL;
1111         u8 *oldest_gens = NULL;
1112         alloc_fifo      free[RESERVE_NR];
1113         alloc_fifo      free_inc;
1114         alloc_heap      alloc_heap;
1115         copygc_heap     copygc_heap;
1116
1117         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1118                              ca->mi.bucket_size / c->opts.btree_node_size);
1119         /* XXX: these should be tunable */
1120         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
1121         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 7);
1122         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
1123                                       btree_reserve * 2);
1124         bool resize = ca->buckets[0] != NULL,
1125              start_copygc = ca->copygc_thread != NULL;
1126         int ret = -ENOMEM;
1127         unsigned i;
1128
1129         memset(&free,           0, sizeof(free));
1130         memset(&free_inc,       0, sizeof(free_inc));
1131         memset(&alloc_heap,     0, sizeof(alloc_heap));
1132         memset(&copygc_heap,    0, sizeof(copygc_heap));
1133
1134         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
1135                                             nbuckets * sizeof(struct bucket),
1136                                             GFP_KERNEL|__GFP_ZERO)) ||
1137             !(oldest_gens       = kvpmalloc(nbuckets * sizeof(u8),
1138                                             GFP_KERNEL|__GFP_ZERO)) ||
1139             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1140                                             sizeof(unsigned long),
1141                                             GFP_KERNEL|__GFP_ZERO)) ||
1142             !(buckets_written   = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1143                                             sizeof(unsigned long),
1144                                             GFP_KERNEL|__GFP_ZERO)) ||
1145             !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
1146             !init_fifo(&free[RESERVE_MOVINGGC],
1147                        copygc_reserve, GFP_KERNEL) ||
1148             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
1149             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
1150             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) ||
1151             !init_heap(&copygc_heap,    copygc_reserve, GFP_KERNEL))
1152                 goto err;
1153
1154         buckets->first_bucket   = ca->mi.first_bucket;
1155         buckets->nbuckets       = nbuckets;
1156
1157         bch2_copygc_stop(ca);
1158
1159         if (resize) {
1160                 down_write(&c->gc_lock);
1161                 down_write(&ca->bucket_lock);
1162                 percpu_down_write(&c->mark_lock);
1163         }
1164
1165         old_buckets = bucket_array(ca);
1166
1167         if (resize) {
1168                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
1169
1170                 memcpy(buckets->b,
1171                        old_buckets->b,
1172                        n * sizeof(struct bucket));
1173                 memcpy(oldest_gens,
1174                        ca->oldest_gens,
1175                        n * sizeof(u8));
1176                 memcpy(buckets_nouse,
1177                        ca->buckets_nouse,
1178                        BITS_TO_LONGS(n) * sizeof(unsigned long));
1179                 memcpy(buckets_written,
1180                        ca->buckets_written,
1181                        BITS_TO_LONGS(n) * sizeof(unsigned long));
1182         }
1183
1184         rcu_assign_pointer(ca->buckets[0], buckets);
1185         buckets = old_buckets;
1186
1187         swap(ca->oldest_gens, oldest_gens);
1188         swap(ca->buckets_nouse, buckets_nouse);
1189         swap(ca->buckets_written, buckets_written);
1190
1191         if (resize)
1192                 percpu_up_write(&c->mark_lock);
1193
1194         spin_lock(&c->freelist_lock);
1195         for (i = 0; i < RESERVE_NR; i++) {
1196                 fifo_move(&free[i], &ca->free[i]);
1197                 swap(ca->free[i], free[i]);
1198         }
1199         fifo_move(&free_inc, &ca->free_inc);
1200         swap(ca->free_inc, free_inc);
1201         spin_unlock(&c->freelist_lock);
1202
1203         /* with gc lock held, alloc_heap can't be in use: */
1204         swap(ca->alloc_heap, alloc_heap);
1205
1206         /* and we shut down copygc: */
1207         swap(ca->copygc_heap, copygc_heap);
1208
1209         nbuckets = ca->mi.nbuckets;
1210
1211         if (resize) {
1212                 up_write(&ca->bucket_lock);
1213                 up_write(&c->gc_lock);
1214         }
1215
1216         if (start_copygc &&
1217             bch2_copygc_start(c, ca))
1218                 bch_err(ca, "error restarting copygc thread");
1219
1220         ret = 0;
1221 err:
1222         free_heap(&copygc_heap);
1223         free_heap(&alloc_heap);
1224         free_fifo(&free_inc);
1225         for (i = 0; i < RESERVE_NR; i++)
1226                 free_fifo(&free[i]);
1227         kvpfree(buckets_nouse,
1228                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
1229         kvpfree(buckets_written,
1230                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
1231         kvpfree(oldest_gens,
1232                 nbuckets * sizeof(u8));
1233         if (buckets)
1234                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
1235
1236         return ret;
1237 }
1238
1239 void bch2_dev_buckets_free(struct bch_dev *ca)
1240 {
1241         unsigned i;
1242
1243         free_heap(&ca->copygc_heap);
1244         free_heap(&ca->alloc_heap);
1245         free_fifo(&ca->free_inc);
1246         for (i = 0; i < RESERVE_NR; i++)
1247                 free_fifo(&ca->free[i]);
1248         kvpfree(ca->buckets_written,
1249                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
1250         kvpfree(ca->buckets_nouse,
1251                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
1252         kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
1253         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
1254                 sizeof(struct bucket_array) +
1255                 ca->mi.nbuckets * sizeof(struct bucket));
1256
1257         free_percpu(ca->usage[0]);
1258 }
1259
1260 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1261 {
1262         if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
1263                 return -ENOMEM;
1264
1265         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
1266 }