]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
Update bcachefs sources to 5241335413 bcachefs: Fix for spinning in journal reclaim...
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  *
7  * Bucket states:
8  * - free bucket: mark == 0
9  *   The bucket contains no data and will not be read
10  *
11  * - allocator bucket: owned_by_allocator == 1
12  *   The bucket is on a free list, or it is an open bucket
13  *
14  * - cached bucket: owned_by_allocator == 0 &&
15  *                  dirty_sectors == 0 &&
16  *                  cached_sectors > 0
17  *   The bucket contains data but may be safely discarded as there are
18  *   enough replicas of the data on other cache devices, or it has been
19  *   written back to the backing device
20  *
21  * - dirty bucket: owned_by_allocator == 0 &&
22  *                 dirty_sectors > 0
23  *   The bucket contains data that we must not discard (either only copy,
24  *   or one of the 'main copies' for data requiring multiple replicas)
25  *
26  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27  *   This is a btree node, journal or gen/prio bucket
28  *
29  * Lifecycle:
30  *
31  * bucket invalidated => bucket on freelist => open bucket =>
32  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
33  *
34  * Note that cache promotion can skip the dirty bucket step, as data
35  * is copied from a deeper tier to a shallower tier, onto a cached
36  * bucket.
37  * Note also that a cached bucket can spontaneously become dirty --
38  * see below.
39  *
40  * Only a traversal of the key space can determine whether a bucket is
41  * truly dirty or cached.
42  *
43  * Transitions:
44  *
45  * - free => allocator: bucket was invalidated
46  * - cached => allocator: bucket was invalidated
47  *
48  * - allocator => dirty: open bucket was filled up
49  * - allocator => cached: open bucket was filled up
50  * - allocator => metadata: metadata was allocated
51  *
52  * - dirty => cached: dirty sectors were copied to a deeper tier
53  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54  * - cached => free: cached sectors were overwritten
55  *
56  * - metadata => free: metadata was freed
57  *
58  * Oddities:
59  * - cached => dirty: a device was removed so formerly replicated data
60  *                    is no longer sufficiently replicated
61  * - free => cached: cannot happen
62  * - free => dirty: cannot happen
63  * - free => metadata: cannot happen
64  */
65
66 #include "bcachefs.h"
67 #include "alloc_background.h"
68 #include "bset.h"
69 #include "btree_gc.h"
70 #include "btree_update.h"
71 #include "buckets.h"
72 #include "ec.h"
73 #include "error.h"
74 #include "movinggc.h"
75 #include "replicas.h"
76
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
79
80 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
81                                               enum bch_data_type data_type,
82                                               s64 sectors)
83 {
84         switch (data_type) {
85         case BCH_DATA_btree:
86                 fs_usage->btree         += sectors;
87                 break;
88         case BCH_DATA_user:
89         case BCH_DATA_parity:
90                 fs_usage->data          += sectors;
91                 break;
92         case BCH_DATA_cached:
93                 fs_usage->cached        += sectors;
94                 break;
95         default:
96                 break;
97         }
98 }
99
100 /*
101  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
102  * wraparound:
103  */
104 void bch2_bucket_seq_cleanup(struct bch_fs *c)
105 {
106         u64 journal_seq = atomic64_read(&c->journal.seq);
107         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
108         struct bch_dev *ca;
109         struct bucket_array *buckets;
110         struct bucket *g;
111         struct bucket_mark m;
112         unsigned i;
113
114         if (journal_seq - c->last_bucket_seq_cleanup <
115             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
116                 return;
117
118         c->last_bucket_seq_cleanup = journal_seq;
119
120         for_each_member_device(ca, c, i) {
121                 down_read(&ca->bucket_lock);
122                 buckets = bucket_array(ca);
123
124                 for_each_bucket(g, buckets) {
125                         bucket_cmpxchg(g, m, ({
126                                 if (!m.journal_seq_valid ||
127                                     bucket_needs_journal_commit(m, last_seq_ondisk))
128                                         break;
129
130                                 m.journal_seq_valid = 0;
131                         }));
132                 }
133                 up_read(&ca->bucket_lock);
134         }
135 }
136
137 void bch2_fs_usage_initialize(struct bch_fs *c)
138 {
139         struct bch_fs_usage *usage;
140         unsigned i;
141
142         percpu_down_write(&c->mark_lock);
143         usage = c->usage_base;
144
145         for (i = 0; i < ARRAY_SIZE(c->usage); i++)
146                 bch2_fs_usage_acc_to_base(c, i);
147
148         for (i = 0; i < BCH_REPLICAS_MAX; i++)
149                 usage->reserved += usage->persistent_reserved[i];
150
151         for (i = 0; i < c->replicas.nr; i++) {
152                 struct bch_replicas_entry *e =
153                         cpu_replicas_entry(&c->replicas, i);
154
155                 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
156         }
157
158         percpu_up_write(&c->mark_lock);
159 }
160
161 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
162 {
163         if (fs_usage == c->usage_scratch)
164                 mutex_unlock(&c->usage_scratch_lock);
165         else
166                 kfree(fs_usage);
167 }
168
169 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
170 {
171         struct bch_fs_usage *ret;
172         unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
173
174         ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
175         if (ret)
176                 return ret;
177
178         if (mutex_trylock(&c->usage_scratch_lock))
179                 goto out_pool;
180
181         ret = kzalloc(bytes, GFP_NOFS);
182         if (ret)
183                 return ret;
184
185         mutex_lock(&c->usage_scratch_lock);
186 out_pool:
187         ret = c->usage_scratch;
188         memset(ret, 0, bytes);
189         return ret;
190 }
191
192 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
193 {
194         struct bch_dev_usage ret;
195
196         memset(&ret, 0, sizeof(ret));
197         acc_u64s_percpu((u64 *) &ret,
198                         (u64 __percpu *) ca->usage[0],
199                         sizeof(ret) / sizeof(u64));
200
201         return ret;
202 }
203
204 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
205                                                 unsigned journal_seq,
206                                                 bool gc)
207 {
208         return this_cpu_ptr(gc
209                             ? c->usage_gc
210                             : c->usage[journal_seq & JOURNAL_BUF_MASK]);
211 }
212
213 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
214 {
215         ssize_t offset = v - (u64 *) c->usage_base;
216         unsigned i, seq;
217         u64 ret;
218
219         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
220         percpu_rwsem_assert_held(&c->mark_lock);
221
222         do {
223                 seq = read_seqcount_begin(&c->usage_lock);
224                 ret = *v;
225
226                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
227                         ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
228         } while (read_seqcount_retry(&c->usage_lock, seq));
229
230         return ret;
231 }
232
233 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
234 {
235         struct bch_fs_usage *ret;
236         unsigned seq, i, v, u64s = fs_usage_u64s(c);
237 retry:
238         ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
239         if (unlikely(!ret))
240                 return NULL;
241
242         percpu_down_read(&c->mark_lock);
243
244         v = fs_usage_u64s(c);
245         if (unlikely(u64s != v)) {
246                 u64s = v;
247                 percpu_up_read(&c->mark_lock);
248                 kfree(ret);
249                 goto retry;
250         }
251
252         do {
253                 seq = read_seqcount_begin(&c->usage_lock);
254                 memcpy(ret, c->usage_base, u64s * sizeof(u64));
255                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
256                         acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[i], u64s);
257         } while (read_seqcount_retry(&c->usage_lock, seq));
258
259         return ret;
260 }
261
262 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
263 {
264         unsigned u64s = fs_usage_u64s(c);
265
266         BUG_ON(idx >= ARRAY_SIZE(c->usage));
267
268         preempt_disable();
269         write_seqcount_begin(&c->usage_lock);
270
271         acc_u64s_percpu((u64 *) c->usage_base,
272                         (u64 __percpu *) c->usage[idx], u64s);
273         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
274
275         write_seqcount_end(&c->usage_lock);
276         preempt_enable();
277 }
278
279 void bch2_fs_usage_to_text(struct printbuf *out,
280                            struct bch_fs *c,
281                            struct bch_fs_usage *fs_usage)
282 {
283         unsigned i;
284
285         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
286
287         pr_buf(out, "hidden:\t\t\t\t%llu\n",
288                fs_usage->hidden);
289         pr_buf(out, "data:\t\t\t\t%llu\n",
290                fs_usage->data);
291         pr_buf(out, "cached:\t\t\t\t%llu\n",
292                fs_usage->cached);
293         pr_buf(out, "reserved:\t\t\t%llu\n",
294                fs_usage->reserved);
295         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
296                fs_usage->nr_inodes);
297         pr_buf(out, "online reserved:\t\t%llu\n",
298                fs_usage->online_reserved);
299
300         for (i = 0;
301              i < ARRAY_SIZE(fs_usage->persistent_reserved);
302              i++) {
303                 pr_buf(out, "%u replicas:\n", i + 1);
304                 pr_buf(out, "\treserved:\t\t%llu\n",
305                        fs_usage->persistent_reserved[i]);
306         }
307
308         for (i = 0; i < c->replicas.nr; i++) {
309                 struct bch_replicas_entry *e =
310                         cpu_replicas_entry(&c->replicas, i);
311
312                 pr_buf(out, "\t");
313                 bch2_replicas_entry_to_text(out, e);
314                 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
315         }
316 }
317
318 #define RESERVE_FACTOR  6
319
320 static u64 reserve_factor(u64 r)
321 {
322         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
323 }
324
325 static u64 avail_factor(u64 r)
326 {
327         return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
328 }
329
330 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
331 {
332         return min(fs_usage->hidden +
333                    fs_usage->btree +
334                    fs_usage->data +
335                    reserve_factor(fs_usage->reserved +
336                                   fs_usage->online_reserved),
337                    c->capacity);
338 }
339
340 static struct bch_fs_usage_short
341 __bch2_fs_usage_read_short(struct bch_fs *c)
342 {
343         struct bch_fs_usage_short ret;
344         u64 data, reserved;
345
346         ret.capacity = c->capacity -
347                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
348
349         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
350                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
351         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
352                 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
353
354         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
355         ret.free        = ret.capacity - ret.used;
356
357         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
358
359         return ret;
360 }
361
362 struct bch_fs_usage_short
363 bch2_fs_usage_read_short(struct bch_fs *c)
364 {
365         struct bch_fs_usage_short ret;
366
367         percpu_down_read(&c->mark_lock);
368         ret = __bch2_fs_usage_read_short(c);
369         percpu_up_read(&c->mark_lock);
370
371         return ret;
372 }
373
374 static inline int is_unavailable_bucket(struct bucket_mark m)
375 {
376         return !is_available_bucket(m);
377 }
378
379 static inline int is_fragmented_bucket(struct bucket_mark m,
380                                        struct bch_dev *ca)
381 {
382         if (!m.owned_by_allocator &&
383             m.data_type == BCH_DATA_user &&
384             bucket_sectors_used(m))
385                 return max_t(int, 0, (int) ca->mi.bucket_size -
386                              bucket_sectors_used(m));
387         return 0;
388 }
389
390 static inline int is_stripe_data_bucket(struct bucket_mark m)
391 {
392         return m.stripe && m.data_type != BCH_DATA_parity;
393 }
394
395 static inline int bucket_stripe_sectors(struct bucket_mark m)
396 {
397         return is_stripe_data_bucket(m) ? m.dirty_sectors : 0;
398 }
399
400 static inline enum bch_data_type bucket_type(struct bucket_mark m)
401 {
402         return m.cached_sectors && !m.dirty_sectors
403                 ? BCH_DATA_cached
404                 : m.data_type;
405 }
406
407 static bool bucket_became_unavailable(struct bucket_mark old,
408                                       struct bucket_mark new)
409 {
410         return is_available_bucket(old) &&
411                !is_available_bucket(new);
412 }
413
414 int bch2_fs_usage_apply(struct bch_fs *c,
415                         struct bch_fs_usage *fs_usage,
416                         struct disk_reservation *disk_res,
417                         unsigned journal_seq)
418 {
419         s64 added = fs_usage->data + fs_usage->reserved;
420         s64 should_not_have_added;
421         int ret = 0;
422
423         percpu_rwsem_assert_held(&c->mark_lock);
424
425         /*
426          * Not allowed to reduce sectors_available except by getting a
427          * reservation:
428          */
429         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
430         if (WARN_ONCE(should_not_have_added > 0,
431                       "disk usage increased by %lli more than reservation of %llu",
432                       added, disk_res ? disk_res->sectors : 0)) {
433                 atomic64_sub(should_not_have_added, &c->sectors_available);
434                 added -= should_not_have_added;
435                 ret = -1;
436         }
437
438         if (added > 0) {
439                 disk_res->sectors               -= added;
440                 fs_usage->online_reserved       -= added;
441         }
442
443         preempt_disable();
444         acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
445                  (u64 *) fs_usage, fs_usage_u64s(c));
446         preempt_enable();
447
448         return ret;
449 }
450
451 static inline void account_bucket(struct bch_fs_usage *fs_usage,
452                                   struct bch_dev_usage *dev_usage,
453                                   enum bch_data_type type,
454                                   int nr, s64 size)
455 {
456         if (type == BCH_DATA_sb || type == BCH_DATA_journal)
457                 fs_usage->hidden        += size;
458
459         dev_usage->buckets[type]        += nr;
460 }
461
462 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
463                                   struct bch_fs_usage *fs_usage,
464                                   struct bucket_mark old, struct bucket_mark new,
465                                   bool gc)
466 {
467         struct bch_dev_usage *u;
468
469         percpu_rwsem_assert_held(&c->mark_lock);
470
471         preempt_disable();
472         u = this_cpu_ptr(ca->usage[gc]);
473
474         if (bucket_type(old))
475                 account_bucket(fs_usage, u, bucket_type(old),
476                                -1, -ca->mi.bucket_size);
477
478         if (bucket_type(new))
479                 account_bucket(fs_usage, u, bucket_type(new),
480                                1, ca->mi.bucket_size);
481
482         u->buckets_alloc +=
483                 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
484         u->buckets_unavailable +=
485                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
486
487         u->buckets_ec += (int) new.stripe - (int) old.stripe;
488         u->sectors_ec += bucket_stripe_sectors(new) -
489                          bucket_stripe_sectors(old);
490
491         u->sectors[old.data_type] -= old.dirty_sectors;
492         u->sectors[new.data_type] += new.dirty_sectors;
493         u->sectors[BCH_DATA_cached] +=
494                 (int) new.cached_sectors - (int) old.cached_sectors;
495         u->sectors_fragmented +=
496                 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
497         preempt_enable();
498
499         if (!is_available_bucket(old) && is_available_bucket(new))
500                 bch2_wake_allocator(ca);
501 }
502
503 __flatten
504 void bch2_dev_usage_from_buckets(struct bch_fs *c)
505 {
506         struct bch_dev *ca;
507         struct bucket_mark old = { .v.counter = 0 };
508         struct bucket_array *buckets;
509         struct bucket *g;
510         unsigned i;
511         int cpu;
512
513         c->usage_base->hidden = 0;
514
515         for_each_member_device(ca, c, i) {
516                 for_each_possible_cpu(cpu)
517                         memset(per_cpu_ptr(ca->usage[0], cpu), 0,
518                                sizeof(*ca->usage[0]));
519
520                 buckets = bucket_array(ca);
521
522                 for_each_bucket(g, buckets)
523                         bch2_dev_usage_update(c, ca, c->usage_base,
524                                               old, g->mark, false);
525         }
526 }
527
528 static inline int update_replicas(struct bch_fs *c,
529                                   struct bch_fs_usage *fs_usage,
530                                   struct bch_replicas_entry *r,
531                                   s64 sectors)
532 {
533         int idx = bch2_replicas_entry_idx(c, r);
534
535         if (idx < 0)
536                 return -1;
537
538         if (!fs_usage)
539                 return 0;
540
541         fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
542         fs_usage->replicas[idx]         += sectors;
543         return 0;
544 }
545
546 static inline void update_cached_sectors(struct bch_fs *c,
547                                          struct bch_fs_usage *fs_usage,
548                                          unsigned dev, s64 sectors)
549 {
550         struct bch_replicas_padded r;
551
552         bch2_replicas_entry_cached(&r.e, dev);
553
554         update_replicas(c, fs_usage, &r.e, sectors);
555 }
556
557 static struct replicas_delta_list *
558 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
559 {
560         struct replicas_delta_list *d = trans->fs_usage_deltas;
561         unsigned new_size = d ? (d->size + more) * 2 : 128;
562
563         if (!d || d->used + more > d->size) {
564                 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
565                 BUG_ON(!d);
566
567                 d->size = new_size;
568                 trans->fs_usage_deltas = d;
569         }
570         return d;
571 }
572
573 static inline void update_replicas_list(struct btree_trans *trans,
574                                         struct bch_replicas_entry *r,
575                                         s64 sectors)
576 {
577         struct replicas_delta_list *d;
578         struct replicas_delta *n;
579         unsigned b;
580
581         if (!sectors)
582                 return;
583
584         b = replicas_entry_bytes(r) + 8;
585         d = replicas_deltas_realloc(trans, b);
586
587         n = (void *) d->d + d->used;
588         n->delta = sectors;
589         memcpy(&n->r, r, replicas_entry_bytes(r));
590         d->used += b;
591 }
592
593 static inline void update_cached_sectors_list(struct btree_trans *trans,
594                                               unsigned dev, s64 sectors)
595 {
596         struct bch_replicas_padded r;
597
598         bch2_replicas_entry_cached(&r.e, dev);
599
600         update_replicas_list(trans, &r.e, sectors);
601 }
602
603 static inline struct replicas_delta *
604 replicas_delta_next(struct replicas_delta *d)
605 {
606         return (void *) d + replicas_entry_bytes(&d->r) + 8;
607 }
608
609 int bch2_replicas_delta_list_apply(struct bch_fs *c,
610                                    struct bch_fs_usage *fs_usage,
611                                    struct replicas_delta_list *r)
612 {
613         struct replicas_delta *d = r->d;
614         struct replicas_delta *top = (void *) r->d + r->used;
615         unsigned i;
616
617         for (d = r->d; d != top; d = replicas_delta_next(d))
618                 if (update_replicas(c, fs_usage, &d->r, d->delta)) {
619                         top = d;
620                         goto unwind;
621                 }
622
623         if (!fs_usage)
624                 return 0;
625
626         fs_usage->nr_inodes += r->nr_inodes;
627
628         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
629                 fs_usage->reserved += r->persistent_reserved[i];
630                 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
631         }
632
633         return 0;
634 unwind:
635         for (d = r->d; d != top; d = replicas_delta_next(d))
636                 update_replicas(c, fs_usage, &d->r, -d->delta);
637         return -1;
638 }
639
640 #define do_mark_fn(fn, c, pos, flags, ...)                              \
641 ({                                                                      \
642         int gc, ret = 0;                                                \
643                                                                         \
644         percpu_rwsem_assert_held(&c->mark_lock);                        \
645                                                                         \
646         for (gc = 0; gc < 2 && !ret; gc++)                              \
647                 if (!gc == !(flags & BTREE_TRIGGER_GC) ||               \
648                     (gc && gc_visited(c, pos)))                         \
649                         ret = fn(c, __VA_ARGS__, gc);                   \
650         ret;                                                            \
651 })
652
653 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
654                                     size_t b, struct bucket_mark *ret,
655                                     bool gc)
656 {
657         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
658         struct bucket *g = __bucket(ca, b, gc);
659         struct bucket_mark old, new;
660
661         old = bucket_cmpxchg(g, new, ({
662                 BUG_ON(!is_available_bucket(new));
663
664                 new.owned_by_allocator  = true;
665                 new.data_type           = 0;
666                 new.cached_sectors      = 0;
667                 new.dirty_sectors       = 0;
668                 new.gen++;
669         }));
670
671         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
672
673         if (old.cached_sectors)
674                 update_cached_sectors(c, fs_usage, ca->dev_idx,
675                                       -((s64) old.cached_sectors));
676
677         if (!gc)
678                 *ret = old;
679         return 0;
680 }
681
682 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
683                             size_t b, struct bucket_mark *old)
684 {
685         do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
686                    ca, b, old);
687
688         if (!old->owned_by_allocator && old->cached_sectors)
689                 trace_invalidate(ca, bucket_to_sector(ca, b),
690                                  old->cached_sectors);
691 }
692
693 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
694                                     size_t b, bool owned_by_allocator,
695                                     bool gc)
696 {
697         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
698         struct bucket *g = __bucket(ca, b, gc);
699         struct bucket_mark old, new;
700
701         old = bucket_cmpxchg(g, new, ({
702                 new.owned_by_allocator  = owned_by_allocator;
703         }));
704
705         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
706
707         BUG_ON(!gc &&
708                !owned_by_allocator && !old.owned_by_allocator);
709
710         return 0;
711 }
712
713 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
714                             size_t b, bool owned_by_allocator,
715                             struct gc_pos pos, unsigned flags)
716 {
717         preempt_disable();
718
719         do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
720                    ca, b, owned_by_allocator);
721
722         preempt_enable();
723 }
724
725 static int bch2_mark_alloc(struct bch_fs *c,
726                            struct bkey_s_c old, struct bkey_s_c new,
727                            struct bch_fs_usage *fs_usage,
728                            u64 journal_seq, unsigned flags)
729 {
730         bool gc = flags & BTREE_TRIGGER_GC;
731         struct bkey_alloc_unpacked u;
732         struct bch_dev *ca;
733         struct bucket *g;
734         struct bucket_mark old_m, m;
735
736         /* We don't do anything for deletions - do we?: */
737         if (new.k->type != KEY_TYPE_alloc)
738                 return 0;
739
740         /*
741          * alloc btree is read in by bch2_alloc_read, not gc:
742          */
743         if ((flags & BTREE_TRIGGER_GC) &&
744             !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
745                 return 0;
746
747         ca = bch_dev_bkey_exists(c, new.k->p.inode);
748
749         if (new.k->p.offset >= ca->mi.nbuckets)
750                 return 0;
751
752         g = __bucket(ca, new.k->p.offset, gc);
753         u = bch2_alloc_unpack(new);
754
755         old_m = bucket_cmpxchg(g, m, ({
756                 m.gen                   = u.gen;
757                 m.data_type             = u.data_type;
758                 m.dirty_sectors         = u.dirty_sectors;
759                 m.cached_sectors        = u.cached_sectors;
760
761                 if (journal_seq) {
762                         m.journal_seq_valid     = 1;
763                         m.journal_seq           = journal_seq;
764                 }
765         }));
766
767         bch2_dev_usage_update(c, ca, fs_usage, old_m, m, gc);
768
769         g->io_time[READ]        = u.read_time;
770         g->io_time[WRITE]       = u.write_time;
771         g->oldest_gen           = u.oldest_gen;
772         g->gen_valid            = 1;
773
774         /*
775          * need to know if we're getting called from the invalidate path or
776          * not:
777          */
778
779         if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
780             old_m.cached_sectors) {
781                 update_cached_sectors(c, fs_usage, ca->dev_idx,
782                                       -old_m.cached_sectors);
783                 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
784                                  old_m.cached_sectors);
785         }
786
787         return 0;
788 }
789
790 #define checked_add(a, b)                                       \
791 ({                                                              \
792         unsigned _res = (unsigned) (a) + (b);                   \
793         bool overflow = _res > U16_MAX;                         \
794         if (overflow)                                           \
795                 _res = U16_MAX;                                 \
796         (a) = _res;                                             \
797         overflow;                                               \
798 })
799
800 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
801                                        size_t b, enum bch_data_type data_type,
802                                        unsigned sectors, bool gc)
803 {
804         struct bucket *g = __bucket(ca, b, gc);
805         struct bucket_mark old, new;
806         bool overflow;
807
808         BUG_ON(data_type != BCH_DATA_sb &&
809                data_type != BCH_DATA_journal);
810
811         old = bucket_cmpxchg(g, new, ({
812                 new.data_type   = data_type;
813                 overflow = checked_add(new.dirty_sectors, sectors);
814         }));
815
816         bch2_fs_inconsistent_on(old.data_type &&
817                                 old.data_type != data_type, c,
818                 "different types of data in same bucket: %s, %s",
819                 bch2_data_types[old.data_type],
820                 bch2_data_types[data_type]);
821
822         bch2_fs_inconsistent_on(overflow, c,
823                 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
824                 ca->dev_idx, b, new.gen,
825                 bch2_data_types[old.data_type ?: data_type],
826                 old.dirty_sectors, sectors);
827
828         if (c)
829                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
830                                       old, new, gc);
831
832         return 0;
833 }
834
835 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
836                                size_t b, enum bch_data_type type,
837                                unsigned sectors, struct gc_pos pos,
838                                unsigned flags)
839 {
840         BUG_ON(type != BCH_DATA_sb &&
841                type != BCH_DATA_journal);
842
843         preempt_disable();
844
845         if (likely(c)) {
846                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
847                            ca, b, type, sectors);
848         } else {
849                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
850         }
851
852         preempt_enable();
853 }
854
855 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
856 {
857         return DIV_ROUND_UP(sectors * n, d);
858 }
859
860 static s64 __ptr_disk_sectors_delta(unsigned old_size,
861                                     unsigned offset, s64 delta,
862                                     unsigned flags,
863                                     unsigned n, unsigned d)
864 {
865         BUG_ON(!n || !d);
866
867         if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
868                 BUG_ON(offset + -delta > old_size);
869
870                 return -disk_sectors_scaled(n, d, old_size) +
871                         disk_sectors_scaled(n, d, offset) +
872                         disk_sectors_scaled(n, d, old_size - offset + delta);
873         } else if (flags & BTREE_TRIGGER_OVERWRITE) {
874                 BUG_ON(offset + -delta > old_size);
875
876                 return -disk_sectors_scaled(n, d, old_size) +
877                         disk_sectors_scaled(n, d, old_size + delta);
878         } else {
879                 return  disk_sectors_scaled(n, d, delta);
880         }
881 }
882
883 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
884                                   unsigned offset, s64 delta,
885                                   unsigned flags)
886 {
887         return __ptr_disk_sectors_delta(p.crc.live_size,
888                                         offset, delta, flags,
889                                         p.crc.compressed_size,
890                                         p.crc.uncompressed_size);
891 }
892
893 static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
894                             const struct bch_extent_ptr *ptr,
895                             s64 sectors, enum bch_data_type ptr_data_type,
896                             u8 bucket_gen, u8 bucket_data_type,
897                             u16 dirty_sectors, u16 cached_sectors)
898 {
899         size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
900         u16 bucket_sectors = !ptr->cached
901                 ? dirty_sectors
902                 : cached_sectors;
903         char buf[200];
904
905         if (gen_after(ptr->gen, bucket_gen)) {
906                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
907                         "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
908                         "while marking %s",
909                         ptr->dev, bucket_nr, bucket_gen,
910                         bch2_data_types[bucket_data_type ?: ptr_data_type],
911                         ptr->gen,
912                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
913                 return -EIO;
914         }
915
916         if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
917                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
918                         "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
919                         "while marking %s",
920                         ptr->dev, bucket_nr, bucket_gen,
921                         bch2_data_types[bucket_data_type ?: ptr_data_type],
922                         ptr->gen,
923                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
924                 return -EIO;
925         }
926
927         if (bucket_gen != ptr->gen && !ptr->cached) {
928                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
929                         "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
930                         "while marking %s",
931                         ptr->dev, bucket_nr, bucket_gen,
932                         bch2_data_types[bucket_data_type ?: ptr_data_type],
933                         ptr->gen,
934                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
935                 return -EIO;
936         }
937
938         if (bucket_gen != ptr->gen)
939                 return 1;
940
941         if (bucket_data_type && ptr_data_type &&
942             bucket_data_type != ptr_data_type) {
943                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
944                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
945                         "while marking %s",
946                         ptr->dev, bucket_nr, bucket_gen,
947                         bch2_data_types[bucket_data_type],
948                         bch2_data_types[ptr_data_type],
949                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
950                 return -EIO;
951         }
952
953         if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
954                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
955                         "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
956                         "while marking %s",
957                         ptr->dev, bucket_nr, bucket_gen,
958                         bch2_data_types[bucket_data_type ?: ptr_data_type],
959                         bucket_sectors, sectors,
960                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
961                 return -EIO;
962         }
963
964         return 0;
965 }
966
967 static int bucket_set_stripe(struct bch_fs *c, struct bkey_s_c k,
968                              unsigned ptr_idx,
969                              struct bch_fs_usage *fs_usage,
970                              u64 journal_seq, unsigned flags,
971                              bool enabled)
972 {
973         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
974         unsigned nr_data = s->nr_blocks - s->nr_redundant;
975         bool parity = ptr_idx >= nr_data;
976         const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
977         bool gc = flags & BTREE_TRIGGER_GC;
978         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
979         struct bucket *g = PTR_BUCKET(ca, ptr, gc);
980         struct bucket_mark new, old;
981         char buf[200];
982         int ret;
983
984         if (enabled)
985                 g->ec_redundancy = s->nr_redundant;
986
987         old = bucket_cmpxchg(g, new, ({
988                 ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
989                                        new.dirty_sectors, new.cached_sectors);
990                 if (ret)
991                         return ret;
992
993                 if (new.stripe && enabled)
994                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
995                                       "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
996                                       ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen,
997                                       (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
998
999                 if (!new.stripe && !enabled)
1000                         bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1001                                       "bucket %u:%zu gen %u: deleting stripe but not marked\n%s",
1002                                       ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen,
1003                                       (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
1004
1005                 new.stripe                      = enabled;
1006
1007                 if ((flags & BTREE_TRIGGER_GC) && parity) {
1008                         new.data_type = enabled ? BCH_DATA_parity : 0;
1009                         new.dirty_sectors = enabled ? le16_to_cpu(s->sectors): 0;
1010                 }
1011
1012                 if (journal_seq) {
1013                         new.journal_seq_valid   = 1;
1014                         new.journal_seq         = journal_seq;
1015                 }
1016         }));
1017
1018         if (!enabled)
1019                 g->ec_redundancy = 0;
1020
1021         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1022         return 0;
1023 }
1024
1025 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1026                           const struct bch_extent_ptr *ptr,
1027                           s64 sectors, enum bch_data_type ptr_data_type,
1028                           u8 bucket_gen, u8 *bucket_data_type,
1029                           u16 *dirty_sectors, u16 *cached_sectors)
1030 {
1031         u16 *dst_sectors = !ptr->cached
1032                 ? dirty_sectors
1033                 : cached_sectors;
1034         int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
1035                                    bucket_gen, *bucket_data_type,
1036                                    *dirty_sectors, *cached_sectors);
1037
1038         if (ret)
1039                 return ret;
1040
1041         *dst_sectors += sectors;
1042         *bucket_data_type = *dirty_sectors || *cached_sectors
1043                 ? ptr_data_type : 0;
1044         return 0;
1045 }
1046
1047 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1048                              struct extent_ptr_decoded p,
1049                              s64 sectors, enum bch_data_type data_type,
1050                              struct bch_fs_usage *fs_usage,
1051                              u64 journal_seq, unsigned flags)
1052 {
1053         bool gc = flags & BTREE_TRIGGER_GC;
1054         struct bucket_mark old, new;
1055         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1056         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
1057         u8 bucket_data_type;
1058         u64 v;
1059         int ret;
1060
1061         v = atomic64_read(&g->_mark.v);
1062         do {
1063                 new.v.counter = old.v.counter = v;
1064                 bucket_data_type = new.data_type;
1065
1066                 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
1067                                      &bucket_data_type,
1068                                      &new.dirty_sectors,
1069                                      &new.cached_sectors);
1070                 if (ret)
1071                         return ret;
1072
1073                 new.data_type = bucket_data_type;
1074
1075                 if (journal_seq) {
1076                         new.journal_seq_valid = 1;
1077                         new.journal_seq = journal_seq;
1078                 }
1079
1080                 if (flags & BTREE_TRIGGER_NOATOMIC) {
1081                         g->_mark = new;
1082                         break;
1083                 }
1084         } while ((v = atomic64_cmpxchg(&g->_mark.v,
1085                               old.v.counter,
1086                               new.v.counter)) != old.v.counter);
1087
1088         bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1089
1090         BUG_ON(!gc && bucket_became_unavailable(old, new));
1091
1092         return 0;
1093 }
1094
1095 static int bch2_mark_stripe_ptr(struct bch_fs *c,
1096                                 struct bch_extent_stripe_ptr p,
1097                                 enum bch_data_type data_type,
1098                                 struct bch_fs_usage *fs_usage,
1099                                 s64 sectors, unsigned flags)
1100 {
1101         bool gc = flags & BTREE_TRIGGER_GC;
1102         struct bch_replicas_padded r;
1103         struct stripe *m;
1104         unsigned i, blocks_nonempty = 0;
1105
1106         m = genradix_ptr(&c->stripes[gc], p.idx);
1107
1108         spin_lock(&c->ec_stripes_heap_lock);
1109
1110         if (!m || !m->alive) {
1111                 spin_unlock(&c->ec_stripes_heap_lock);
1112                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1113                                     (u64) p.idx);
1114                 return -EIO;
1115         }
1116
1117         m->block_sectors[p.block] += sectors;
1118
1119         r = m->r;
1120
1121         for (i = 0; i < m->nr_blocks; i++)
1122                 blocks_nonempty += m->block_sectors[i] != 0;
1123
1124         if (m->blocks_nonempty != blocks_nonempty) {
1125                 m->blocks_nonempty = blocks_nonempty;
1126                 if (!gc)
1127                         bch2_stripes_heap_update(c, m, p.idx);
1128         }
1129
1130         spin_unlock(&c->ec_stripes_heap_lock);
1131
1132         r.e.data_type = data_type;
1133         update_replicas(c, fs_usage, &r.e, sectors);
1134
1135         return 0;
1136 }
1137
1138 static int bch2_mark_extent(struct bch_fs *c,
1139                             struct bkey_s_c old, struct bkey_s_c new,
1140                             unsigned offset, s64 sectors,
1141                             enum bch_data_type data_type,
1142                             struct bch_fs_usage *fs_usage,
1143                             unsigned journal_seq, unsigned flags)
1144 {
1145         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1146         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1147         const union bch_extent_entry *entry;
1148         struct extent_ptr_decoded p;
1149         struct bch_replicas_padded r;
1150         s64 dirty_sectors = 0;
1151         bool stale;
1152         int ret;
1153
1154         r.e.data_type   = data_type;
1155         r.e.nr_devs     = 0;
1156         r.e.nr_required = 1;
1157
1158         BUG_ON(!sectors);
1159
1160         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1161                 s64 disk_sectors = data_type == BCH_DATA_btree
1162                         ? sectors
1163                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1164
1165                 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
1166                                         fs_usage, journal_seq, flags);
1167                 if (ret < 0)
1168                         return ret;
1169
1170                 stale = ret > 0;
1171
1172                 if (p.ptr.cached) {
1173                         if (!stale)
1174                                 update_cached_sectors(c, fs_usage, p.ptr.dev,
1175                                                       disk_sectors);
1176                 } else if (!p.has_ec) {
1177                         dirty_sectors          += disk_sectors;
1178                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1179                 } else {
1180                         ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1181                                         fs_usage, disk_sectors, flags);
1182                         if (ret)
1183                                 return ret;
1184
1185                         /*
1186                          * There may be other dirty pointers in this extent, but
1187                          * if so they're not required for mounting if we have an
1188                          * erasure coded pointer in this extent:
1189                          */
1190                         r.e.nr_required = 0;
1191                 }
1192         }
1193
1194         if (r.e.nr_devs)
1195                 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1196
1197         return 0;
1198 }
1199
1200 static int bch2_mark_stripe(struct bch_fs *c,
1201                             struct bkey_s_c old, struct bkey_s_c new,
1202                             struct bch_fs_usage *fs_usage,
1203                             u64 journal_seq, unsigned flags)
1204 {
1205         bool gc = flags & BTREE_TRIGGER_GC;
1206         size_t idx = new.k->p.offset;
1207         const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1208                 ? bkey_s_c_to_stripe(old).v : NULL;
1209         const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1210                 ? bkey_s_c_to_stripe(new).v : NULL;
1211         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1212         unsigned i;
1213         int ret;
1214
1215         if (!m || (old_s && !m->alive)) {
1216                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1217                                     idx);
1218                 return -1;
1219         }
1220
1221         if (!new_s) {
1222                 /* Deleting: */
1223                 for (i = 0; i < old_s->nr_blocks; i++) {
1224                         ret = bucket_set_stripe(c, old, i, fs_usage,
1225                                                 journal_seq, flags, false);
1226                         if (ret)
1227                                 return ret;
1228                 }
1229
1230                 if (!gc && m->on_heap) {
1231                         spin_lock(&c->ec_stripes_heap_lock);
1232                         bch2_stripes_heap_del(c, m, idx);
1233                         spin_unlock(&c->ec_stripes_heap_lock);
1234                 }
1235
1236                 if (gc)
1237                         update_replicas(c, fs_usage, &m->r.e,
1238                                         -((s64) m->sectors * m->nr_redundant));
1239
1240                 memset(m, 0, sizeof(*m));
1241         } else {
1242                 BUG_ON(old_s && new_s->nr_blocks != old_s->nr_blocks);
1243                 BUG_ON(old_s && new_s->nr_redundant != old_s->nr_redundant);
1244
1245                 for (i = 0; i < new_s->nr_blocks; i++) {
1246                         if (!old_s ||
1247                             memcmp(new_s->ptrs + i,
1248                                    old_s->ptrs + i,
1249                                    sizeof(struct bch_extent_ptr))) {
1250
1251                                 if (old_s) {
1252                                         bucket_set_stripe(c, old, i, fs_usage,
1253                                                           journal_seq, flags, false);
1254                                         if (ret)
1255                                                 return ret;
1256                                 }
1257                                 ret = bucket_set_stripe(c, new, i, fs_usage,
1258                                                         journal_seq, flags, true);
1259                                 if (ret)
1260                                         return ret;
1261                         }
1262                 }
1263
1264                 m->alive        = true;
1265                 m->sectors      = le16_to_cpu(new_s->sectors);
1266                 m->algorithm    = new_s->algorithm;
1267                 m->nr_blocks    = new_s->nr_blocks;
1268                 m->nr_redundant = new_s->nr_redundant;
1269                 m->blocks_nonempty = 0;
1270
1271                 for (i = 0; i < new_s->nr_blocks; i++) {
1272                         m->block_sectors[i] =
1273                                 stripe_blockcount_get(new_s, i);
1274                         m->blocks_nonempty += !!m->block_sectors[i];
1275                 }
1276
1277                 if (gc && old_s)
1278                         update_replicas(c, fs_usage, &m->r.e,
1279                                         -((s64) m->sectors * m->nr_redundant));
1280
1281                 bch2_bkey_to_replicas(&m->r.e, new);
1282
1283                 if (gc)
1284                         update_replicas(c, fs_usage, &m->r.e,
1285                                         ((s64) m->sectors * m->nr_redundant));
1286
1287                 if (!gc) {
1288                         spin_lock(&c->ec_stripes_heap_lock);
1289                         bch2_stripes_heap_update(c, m, idx);
1290                         spin_unlock(&c->ec_stripes_heap_lock);
1291                 }
1292         }
1293
1294         return 0;
1295 }
1296
1297 static int bch2_mark_key_locked(struct bch_fs *c,
1298                    struct bkey_s_c old,
1299                    struct bkey_s_c new,
1300                    unsigned offset, s64 sectors,
1301                    struct bch_fs_usage *fs_usage,
1302                    u64 journal_seq, unsigned flags)
1303 {
1304         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1305         int ret = 0;
1306
1307         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1308
1309         preempt_disable();
1310
1311         if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1312                 fs_usage = fs_usage_ptr(c, journal_seq,
1313                                         flags & BTREE_TRIGGER_GC);
1314
1315         switch (k.k->type) {
1316         case KEY_TYPE_alloc:
1317                 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1318                 break;
1319         case KEY_TYPE_btree_ptr:
1320         case KEY_TYPE_btree_ptr_v2:
1321                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1322                         ?  c->opts.btree_node_size
1323                         : -c->opts.btree_node_size;
1324
1325                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1326                                 BCH_DATA_btree, fs_usage, journal_seq, flags);
1327                 break;
1328         case KEY_TYPE_extent:
1329         case KEY_TYPE_reflink_v:
1330                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1331                                 BCH_DATA_user, fs_usage, journal_seq, flags);
1332                 break;
1333         case KEY_TYPE_stripe:
1334                 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1335                 break;
1336         case KEY_TYPE_inode:
1337                 fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode;
1338                 fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode;
1339                 break;
1340         case KEY_TYPE_reservation: {
1341                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1342
1343                 sectors *= replicas;
1344                 replicas = clamp_t(unsigned, replicas, 1,
1345                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1346
1347                 fs_usage->reserved                              += sectors;
1348                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1349                 break;
1350         }
1351         }
1352
1353         preempt_enable();
1354
1355         return ret;
1356 }
1357
1358 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1359                   unsigned offset, s64 sectors,
1360                   struct bch_fs_usage *fs_usage,
1361                   u64 journal_seq, unsigned flags)
1362 {
1363         struct bkey deleted;
1364         struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1365         int ret;
1366
1367         bkey_init(&deleted);
1368
1369         percpu_down_read(&c->mark_lock);
1370         ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1371                                    fs_usage, journal_seq,
1372                                    BTREE_TRIGGER_INSERT|flags);
1373         percpu_up_read(&c->mark_lock);
1374
1375         return ret;
1376 }
1377
1378 int bch2_mark_update(struct btree_trans *trans,
1379                      struct btree_iter *iter,
1380                      struct bkey_i *new,
1381                      struct bch_fs_usage *fs_usage,
1382                      unsigned flags)
1383 {
1384         struct bch_fs           *c = trans->c;
1385         struct btree            *b = iter_l(iter)->b;
1386         struct btree_node_iter  node_iter = iter_l(iter)->iter;
1387         struct bkey_packed      *_old;
1388         struct bkey_s_c         old;
1389         struct bkey             unpacked;
1390         int ret = 0;
1391
1392         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1393                 return 0;
1394
1395         if (!btree_node_type_needs_gc(iter->btree_id))
1396                 return 0;
1397
1398         bkey_init(&unpacked);
1399         old = (struct bkey_s_c) { &unpacked, NULL };
1400
1401         if (!btree_node_type_is_extents(iter->btree_id)) {
1402                 /* iterators should be uptodate, shouldn't get errors here: */
1403                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1404                         old = bch2_btree_iter_peek_slot(iter);
1405                         BUG_ON(bkey_err(old));
1406                 } else {
1407                         struct bkey_cached *ck = (void *) iter->l[0].b;
1408
1409                         if (ck->valid)
1410                                 old = bkey_i_to_s_c(ck->k);
1411                 }
1412
1413                 if (old.k->type == new->k.type) {
1414                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1415                                 fs_usage, trans->journal_res.seq,
1416                                 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1417
1418                 } else {
1419                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1420                                 fs_usage, trans->journal_res.seq,
1421                                 BTREE_TRIGGER_INSERT|flags);
1422                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1423                                 fs_usage, trans->journal_res.seq,
1424                                 BTREE_TRIGGER_OVERWRITE|flags);
1425                 }
1426         } else {
1427                 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1428                 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1429                         0, new->k.size,
1430                         fs_usage, trans->journal_res.seq,
1431                         BTREE_TRIGGER_INSERT|flags);
1432
1433                 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
1434                         unsigned offset = 0;
1435                         s64 sectors;
1436
1437                         old = bkey_disassemble(b, _old, &unpacked);
1438                         sectors = -((s64) old.k->size);
1439
1440                         flags |= BTREE_TRIGGER_OVERWRITE;
1441
1442                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1443                                 return 0;
1444
1445                         switch (bch2_extent_overlap(&new->k, old.k)) {
1446                         case BCH_EXTENT_OVERLAP_ALL:
1447                                 offset = 0;
1448                                 sectors = -((s64) old.k->size);
1449                                 break;
1450                         case BCH_EXTENT_OVERLAP_BACK:
1451                                 offset = bkey_start_offset(&new->k) -
1452                                         bkey_start_offset(old.k);
1453                                 sectors = bkey_start_offset(&new->k) -
1454                                         old.k->p.offset;
1455                                 break;
1456                         case BCH_EXTENT_OVERLAP_FRONT:
1457                                 offset = 0;
1458                                 sectors = bkey_start_offset(old.k) -
1459                                         new->k.p.offset;
1460                                 break;
1461                         case BCH_EXTENT_OVERLAP_MIDDLE:
1462                                 offset = bkey_start_offset(&new->k) -
1463                                         bkey_start_offset(old.k);
1464                                 sectors = -((s64) new->k.size);
1465                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1466                                 break;
1467                         }
1468
1469                         BUG_ON(sectors >= 0);
1470
1471                         ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1472                                         offset, sectors, fs_usage,
1473                                         trans->journal_res.seq, flags) ?: 1;
1474                         if (ret <= 0)
1475                                 break;
1476
1477                         bch2_btree_node_iter_advance(&node_iter, b);
1478                 }
1479         }
1480
1481         return ret;
1482 }
1483
1484 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1485                                struct bch_fs_usage *fs_usage)
1486 {
1487         struct bch_fs *c = trans->c;
1488         struct btree_insert_entry *i;
1489         static int warned_disk_usage = 0;
1490         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1491         char buf[200];
1492
1493         if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1494                                  trans->journal_res.seq) ||
1495             warned_disk_usage ||
1496             xchg(&warned_disk_usage, 1))
1497                 return;
1498
1499         bch_err(c, "disk usage increased more than %llu sectors reserved",
1500                 disk_res_sectors);
1501
1502         trans_for_each_update(trans, i) {
1503                 pr_err("while inserting");
1504                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1505                 pr_err("%s", buf);
1506                 pr_err("overlapping with");
1507
1508                 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1509                         struct btree            *b = iter_l(i->iter)->b;
1510                         struct btree_node_iter  node_iter = iter_l(i->iter)->iter;
1511                         struct bkey_packed      *_k;
1512
1513                         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1514                                 struct bkey             unpacked;
1515                                 struct bkey_s_c         k;
1516
1517                                 pr_info("_k %px format %u", _k, _k->format);
1518                                 k = bkey_disassemble(b, _k, &unpacked);
1519
1520                                 if (btree_node_is_extents(b)
1521                                     ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1522                                     : bkey_cmp(i->k->k.p, k.k->p))
1523                                         break;
1524
1525                                 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1526                                 pr_err("%s", buf);
1527
1528                                 bch2_btree_node_iter_advance(&node_iter, b);
1529                         }
1530                 } else {
1531                         struct bkey_cached *ck = (void *) i->iter->l[0].b;
1532
1533                         if (ck->valid) {
1534                                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1535                                 pr_err("%s", buf);
1536                         }
1537                 }
1538         }
1539 }
1540
1541 /* trans_mark: */
1542
1543 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1544                             enum btree_id btree_id, struct bpos pos,
1545                             struct bkey_s_c *k)
1546 {
1547         struct btree_insert_entry *i;
1548
1549         trans_for_each_update(trans, i)
1550                 if (i->iter->btree_id == btree_id &&
1551                     (btree_node_type_is_extents(btree_id)
1552                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1553                        bkey_cmp(pos, i->k->k.p) < 0
1554                      : !bkey_cmp(pos, i->iter->pos))) {
1555                         *k = bkey_i_to_s_c(i->k);
1556                         return i->iter;
1557                 }
1558
1559         return NULL;
1560 }
1561
1562 static int trans_get_key(struct btree_trans *trans,
1563                          enum btree_id btree_id, struct bpos pos,
1564                          struct btree_iter **iter,
1565                          struct bkey_s_c *k)
1566 {
1567         unsigned flags = btree_id != BTREE_ID_ALLOC
1568                 ? BTREE_ITER_SLOTS
1569                 : BTREE_ITER_CACHED;
1570         int ret;
1571
1572         *iter = trans_get_update(trans, btree_id, pos, k);
1573         if (*iter)
1574                 return 1;
1575
1576         *iter = bch2_trans_get_iter(trans, btree_id, pos,
1577                                     flags|BTREE_ITER_INTENT);
1578         *k = __bch2_btree_iter_peek(*iter, flags);
1579         ret = bkey_err(*k);
1580         if (ret)
1581                 bch2_trans_iter_put(trans, *iter);
1582         return ret;
1583 }
1584
1585 static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter,
1586                                          const struct bch_extent_ptr *ptr,
1587                                          struct bkey_alloc_unpacked *u)
1588 {
1589         struct bch_fs *c = trans->c;
1590         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1591         struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1592         struct bucket *g;
1593         struct btree_iter *iter;
1594         struct bkey_s_c k;
1595         int ret;
1596
1597         iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k);
1598         if (iter) {
1599                 *u = bch2_alloc_unpack(k);
1600         } else {
1601                 iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
1602                                            BTREE_ITER_CACHED|
1603                                            BTREE_ITER_CACHED_NOFILL|
1604                                            BTREE_ITER_INTENT);
1605                 ret = bch2_btree_iter_traverse(iter);
1606                 if (ret) {
1607                         bch2_trans_iter_put(trans, iter);
1608                         return ret;
1609                 }
1610
1611                 percpu_down_read(&c->mark_lock);
1612                 g = bucket(ca, pos.offset);
1613                 *u = alloc_mem_to_key(g, READ_ONCE(g->mark));
1614                 percpu_up_read(&c->mark_lock);
1615         }
1616
1617         *_iter = iter;
1618         return 0;
1619 }
1620
1621 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1622                         struct bkey_s_c k, struct extent_ptr_decoded p,
1623                         s64 sectors, enum bch_data_type data_type)
1624 {
1625         struct bch_fs *c = trans->c;
1626         struct btree_iter *iter;
1627         struct bkey_alloc_unpacked u;
1628         struct bkey_i_alloc *a;
1629         int ret;
1630
1631         ret = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1632         if (ret)
1633                 return ret;
1634
1635         ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
1636                              &u.dirty_sectors, &u.cached_sectors);
1637         if (ret)
1638                 goto out;
1639
1640         a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1641         ret = PTR_ERR_OR_ZERO(a);
1642         if (ret)
1643                 goto out;
1644
1645         bkey_alloc_init(&a->k_i);
1646         a->k.p = iter->pos;
1647         bch2_alloc_pack(a, u);
1648         bch2_trans_update(trans, iter, &a->k_i, 0);
1649 out:
1650         bch2_trans_iter_put(trans, iter);
1651         return ret;
1652 }
1653
1654 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1655                         struct bch_extent_stripe_ptr p,
1656                         s64 sectors, enum bch_data_type data_type)
1657 {
1658         struct bch_fs *c = trans->c;
1659         struct btree_iter *iter;
1660         struct bkey_s_c k;
1661         struct bkey_i_stripe *s;
1662         struct bch_replicas_padded r;
1663         int ret = 0;
1664
1665         ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1666         if (ret < 0)
1667                 return ret;
1668
1669         if (k.k->type != KEY_TYPE_stripe) {
1670                 bch2_fs_inconsistent(c,
1671                         "pointer to nonexistent stripe %llu",
1672                         (u64) p.idx);
1673                 ret = -EIO;
1674                 goto out;
1675         }
1676
1677         s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1678         ret = PTR_ERR_OR_ZERO(s);
1679         if (ret)
1680                 goto out;
1681
1682         bkey_reassemble(&s->k_i, k);
1683         stripe_blockcount_set(&s->v, p.block,
1684                 stripe_blockcount_get(&s->v, p.block) +
1685                 sectors);
1686         bch2_trans_update(trans, iter, &s->k_i, 0);
1687
1688         bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1689         r.e.data_type = data_type;
1690         update_replicas_list(trans, &r.e, sectors);
1691 out:
1692         bch2_trans_iter_put(trans, iter);
1693         return ret;
1694 }
1695
1696 static int bch2_trans_mark_extent(struct btree_trans *trans,
1697                         struct bkey_s_c k, unsigned offset,
1698                         s64 sectors, unsigned flags,
1699                         enum bch_data_type data_type)
1700 {
1701         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1702         const union bch_extent_entry *entry;
1703         struct extent_ptr_decoded p;
1704         struct bch_replicas_padded r;
1705         s64 dirty_sectors = 0;
1706         bool stale;
1707         int ret;
1708
1709         r.e.data_type   = data_type;
1710         r.e.nr_devs     = 0;
1711         r.e.nr_required = 1;
1712
1713         BUG_ON(!sectors);
1714
1715         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1716                 s64 disk_sectors = data_type == BCH_DATA_btree
1717                         ? sectors
1718                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1719
1720                 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1721                                               data_type);
1722                 if (ret < 0)
1723                         return ret;
1724
1725                 stale = ret > 0;
1726
1727                 if (p.ptr.cached) {
1728                         if (!stale)
1729                                 update_cached_sectors_list(trans, p.ptr.dev,
1730                                                            disk_sectors);
1731                 } else if (!p.has_ec) {
1732                         dirty_sectors          += disk_sectors;
1733                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1734                 } else {
1735                         ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
1736                                         disk_sectors, data_type);
1737                         if (ret)
1738                                 return ret;
1739
1740                         r.e.nr_required = 0;
1741                 }
1742         }
1743
1744         if (r.e.nr_devs)
1745                 update_replicas_list(trans, &r.e, dirty_sectors);
1746
1747         return 0;
1748 }
1749
1750 static int bch2_trans_mark_stripe_alloc_ref(struct btree_trans *trans,
1751                                             const struct bch_extent_ptr *ptr,
1752                                             s64 sectors, bool parity)
1753 {
1754         struct bkey_i_alloc *a;
1755         struct btree_iter *iter;
1756         struct bkey_alloc_unpacked u;
1757         int ret;
1758
1759         ret = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
1760         if (ret)
1761                 return ret;
1762
1763         if (parity) {
1764                 u.dirty_sectors += sectors;
1765                 u.data_type = u.dirty_sectors
1766                         ? BCH_DATA_parity
1767                         : 0;
1768         }
1769
1770         a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1771         ret = PTR_ERR_OR_ZERO(a);
1772         if (ret)
1773                 goto err;
1774
1775         bkey_alloc_init(&a->k_i);
1776         a->k.p = iter->pos;
1777         bch2_alloc_pack(a, u);
1778         bch2_trans_update(trans, iter, &a->k_i, 0);
1779 err:
1780         bch2_trans_iter_put(trans, iter);
1781         return ret;
1782 }
1783
1784 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1785                                   struct bkey_s_c old, struct bkey_s_c new,
1786                                   unsigned flags)
1787 {
1788         const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1789                 ? bkey_s_c_to_stripe(old).v : NULL;
1790         const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1791                 ? bkey_s_c_to_stripe(new).v : NULL;
1792         struct bch_replicas_padded r;
1793         unsigned i;
1794         int ret = 0;
1795
1796         /*
1797          * If the pointers aren't changing, we don't need to do anything:
1798          */
1799         if (new_s && old_s &&
1800             !memcmp(old_s->ptrs, new_s->ptrs,
1801                     new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
1802                 return 0;
1803
1804         if (new_s) {
1805                 unsigned nr_data = new_s->nr_blocks - new_s->nr_redundant;
1806                 s64 sectors = le16_to_cpu(new_s->sectors);
1807
1808                 bch2_bkey_to_replicas(&r.e, new);
1809                 update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
1810
1811                 for (i = 0; i < new_s->nr_blocks; i++) {
1812                         bool parity = i >= nr_data;
1813
1814                         ret = bch2_trans_mark_stripe_alloc_ref(trans,
1815                                         &new_s->ptrs[i], sectors, parity);
1816                         if (ret)
1817                                 return ret;
1818                 }
1819         }
1820
1821         if (old_s) {
1822                 unsigned nr_data = old_s->nr_blocks - old_s->nr_redundant;
1823                 s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
1824
1825                 bch2_bkey_to_replicas(&r.e, old);
1826                 update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
1827
1828                 for (i = 0; i < old_s->nr_blocks; i++) {
1829                         bool parity = i >= nr_data;
1830
1831                         ret = bch2_trans_mark_stripe_alloc_ref(trans,
1832                                         &old_s->ptrs[i], sectors, parity);
1833                         if (ret)
1834                                 return ret;
1835                 }
1836         }
1837
1838         return ret;
1839 }
1840
1841 static __le64 *bkey_refcount(struct bkey_i *k)
1842 {
1843         switch (k->k.type) {
1844         case KEY_TYPE_reflink_v:
1845                 return &bkey_i_to_reflink_v(k)->v.refcount;
1846         case KEY_TYPE_indirect_inline_data:
1847                 return &bkey_i_to_indirect_inline_data(k)->v.refcount;
1848         default:
1849                 return NULL;
1850         }
1851 }
1852
1853 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1854                         struct bkey_s_c_reflink_p p,
1855                         u64 idx, unsigned sectors,
1856                         unsigned flags)
1857 {
1858         struct bch_fs *c = trans->c;
1859         struct btree_iter *iter;
1860         struct bkey_s_c k;
1861         struct bkey_i *n;
1862         __le64 *refcount;
1863         s64 ret;
1864
1865         ret = trans_get_key(trans, BTREE_ID_REFLINK,
1866                             POS(0, idx), &iter, &k);
1867         if (ret < 0)
1868                 return ret;
1869
1870         if ((flags & BTREE_TRIGGER_OVERWRITE) &&
1871             (bkey_start_offset(k.k) < idx ||
1872              k.k->p.offset > idx + sectors))
1873                 goto out;
1874
1875         sectors = k.k->p.offset - idx;
1876
1877         n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1878         ret = PTR_ERR_OR_ZERO(n);
1879         if (ret)
1880                 goto err;
1881
1882         bkey_reassemble(n, k);
1883
1884         refcount = bkey_refcount(n);
1885         if (!refcount) {
1886                 bch2_fs_inconsistent(c,
1887                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1888                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1889                 ret = -EIO;
1890                 goto err;
1891         }
1892
1893         le64_add_cpu(refcount, !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
1894
1895         if (!*refcount) {
1896                 n->k.type = KEY_TYPE_deleted;
1897                 set_bkey_val_u64s(&n->k, 0);
1898         }
1899
1900         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1901         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1902
1903         bch2_trans_update(trans, iter, n, 0);
1904 out:
1905         ret = sectors;
1906 err:
1907         bch2_trans_iter_put(trans, iter);
1908         return ret;
1909 }
1910
1911 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1912                         struct bkey_s_c_reflink_p p, unsigned offset,
1913                         s64 sectors, unsigned flags)
1914 {
1915         u64 idx = le64_to_cpu(p.v->idx) + offset;
1916         s64 ret = 0;
1917
1918         sectors = abs(sectors);
1919         BUG_ON(offset + sectors > p.k->size);
1920
1921         while (sectors) {
1922                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1923                 if (ret < 0)
1924                         break;
1925
1926                 idx += ret;
1927                 sectors = max_t(s64, 0LL, sectors - ret);
1928                 ret = 0;
1929         }
1930
1931         return ret;
1932 }
1933
1934 int bch2_trans_mark_key(struct btree_trans *trans,
1935                         struct bkey_s_c old,
1936                         struct bkey_s_c new,
1937                         unsigned offset, s64 sectors, unsigned flags)
1938 {
1939         struct bch_fs *c = trans->c;
1940         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1941         struct replicas_delta_list *d;
1942
1943         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1944
1945         switch (k.k->type) {
1946         case KEY_TYPE_btree_ptr:
1947         case KEY_TYPE_btree_ptr_v2:
1948                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1949                         ?  c->opts.btree_node_size
1950                         : -c->opts.btree_node_size;
1951
1952                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1953                                               flags, BCH_DATA_btree);
1954         case KEY_TYPE_extent:
1955         case KEY_TYPE_reflink_v:
1956                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1957                                               flags, BCH_DATA_user);
1958         case KEY_TYPE_stripe:
1959                 return bch2_trans_mark_stripe(trans, old, new, flags);
1960         case KEY_TYPE_inode: {
1961                 int nr = (new.k->type == KEY_TYPE_inode) -
1962                          (old.k->type == KEY_TYPE_inode);
1963
1964                 if (nr) {
1965                         d = replicas_deltas_realloc(trans, 0);
1966                         d->nr_inodes += nr;
1967                 }
1968
1969                 return 0;
1970         }
1971         case KEY_TYPE_reservation: {
1972                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1973
1974                 d = replicas_deltas_realloc(trans, 0);
1975
1976                 sectors *= replicas;
1977                 replicas = clamp_t(unsigned, replicas, 1,
1978                                    ARRAY_SIZE(d->persistent_reserved));
1979
1980                 d->persistent_reserved[replicas - 1] += sectors;
1981                 return 0;
1982         }
1983         case KEY_TYPE_reflink_p:
1984                 return bch2_trans_mark_reflink_p(trans,
1985                                         bkey_s_c_to_reflink_p(k),
1986                                         offset, sectors, flags);
1987         default:
1988                 return 0;
1989         }
1990 }
1991
1992 int bch2_trans_mark_update(struct btree_trans *trans,
1993                            struct btree_iter *iter,
1994                            struct bkey_i *new,
1995                            unsigned flags)
1996 {
1997         struct bkey_s_c old;
1998         int ret;
1999
2000         if (unlikely(flags & BTREE_TRIGGER_NORUN))
2001                 return 0;
2002
2003         if (!btree_node_type_needs_gc(iter->btree_id))
2004                 return 0;
2005
2006         if (!btree_node_type_is_extents(iter->btree_id)) {
2007                 /* iterators should be uptodate, shouldn't get errors here: */
2008                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
2009                         old = bch2_btree_iter_peek_slot(iter);
2010                         BUG_ON(bkey_err(old));
2011                 } else {
2012                         struct bkey_cached *ck = (void *) iter->l[0].b;
2013
2014                         BUG_ON(!ck->valid);
2015                         old = bkey_i_to_s_c(ck->k);
2016                 }
2017
2018                 if (old.k->type == new->k.type) {
2019                         ret   = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
2020                                         BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
2021                 } else {
2022                         ret   = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
2023                                         BTREE_TRIGGER_INSERT|flags) ?:
2024                                 bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
2025                                         BTREE_TRIGGER_OVERWRITE|flags);
2026                 }
2027         } else {
2028                 struct btree            *b = iter_l(iter)->b;
2029                 struct btree_node_iter  node_iter = iter_l(iter)->iter;
2030                 struct bkey_packed      *_old;
2031                 struct bkey             unpacked;
2032
2033                 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
2034
2035                 bkey_init(&unpacked);
2036                 old = (struct bkey_s_c) { &unpacked, NULL };
2037
2038                 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
2039                                           0, new->k.size,
2040                                           BTREE_TRIGGER_INSERT);
2041                 if (ret)
2042                         return ret;
2043
2044                 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
2045                         unsigned flags = BTREE_TRIGGER_OVERWRITE;
2046                         unsigned offset = 0;
2047                         s64 sectors;
2048
2049                         old = bkey_disassemble(b, _old, &unpacked);
2050                         sectors = -((s64) old.k->size);
2051
2052                         flags |= BTREE_TRIGGER_OVERWRITE;
2053
2054                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
2055                                 return 0;
2056
2057                         switch (bch2_extent_overlap(&new->k, old.k)) {
2058                         case BCH_EXTENT_OVERLAP_ALL:
2059                                 offset = 0;
2060                                 sectors = -((s64) old.k->size);
2061                                 break;
2062                         case BCH_EXTENT_OVERLAP_BACK:
2063                                 offset = bkey_start_offset(&new->k) -
2064                                         bkey_start_offset(old.k);
2065                                 sectors = bkey_start_offset(&new->k) -
2066                                         old.k->p.offset;
2067                                 break;
2068                         case BCH_EXTENT_OVERLAP_FRONT:
2069                                 offset = 0;
2070                                 sectors = bkey_start_offset(old.k) -
2071                                         new->k.p.offset;
2072                                 break;
2073                         case BCH_EXTENT_OVERLAP_MIDDLE:
2074                                 offset = bkey_start_offset(&new->k) -
2075                                         bkey_start_offset(old.k);
2076                                 sectors = -((s64) new->k.size);
2077                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
2078                                 break;
2079                         }
2080
2081                         BUG_ON(sectors >= 0);
2082
2083                         ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
2084                                         offset, sectors, flags);
2085                         if (ret)
2086                                 return ret;
2087
2088                         bch2_btree_node_iter_advance(&node_iter, b);
2089                 }
2090         }
2091
2092         return ret;
2093 }
2094
2095 /* Disk reservations: */
2096
2097 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
2098 {
2099         percpu_down_read(&c->mark_lock);
2100         this_cpu_sub(c->usage[0]->online_reserved,
2101                      res->sectors);
2102         percpu_up_read(&c->mark_lock);
2103
2104         res->sectors = 0;
2105 }
2106
2107 #define SECTORS_CACHE   1024
2108
2109 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2110                               unsigned sectors, int flags)
2111 {
2112         struct bch_fs_pcpu *pcpu;
2113         u64 old, v, get;
2114         s64 sectors_available;
2115         int ret;
2116
2117         percpu_down_read(&c->mark_lock);
2118         preempt_disable();
2119         pcpu = this_cpu_ptr(c->pcpu);
2120
2121         if (sectors <= pcpu->sectors_available)
2122                 goto out;
2123
2124         v = atomic64_read(&c->sectors_available);
2125         do {
2126                 old = v;
2127                 get = min((u64) sectors + SECTORS_CACHE, old);
2128
2129                 if (get < sectors) {
2130                         preempt_enable();
2131                         goto recalculate;
2132                 }
2133         } while ((v = atomic64_cmpxchg(&c->sectors_available,
2134                                        old, old - get)) != old);
2135
2136         pcpu->sectors_available         += get;
2137
2138 out:
2139         pcpu->sectors_available         -= sectors;
2140         this_cpu_add(c->usage[0]->online_reserved, sectors);
2141         res->sectors                    += sectors;
2142
2143         preempt_enable();
2144         percpu_up_read(&c->mark_lock);
2145         return 0;
2146
2147 recalculate:
2148         mutex_lock(&c->sectors_available_lock);
2149
2150         percpu_u64_set(&c->pcpu->sectors_available, 0);
2151         sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2152
2153         if (sectors <= sectors_available ||
2154             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2155                 atomic64_set(&c->sectors_available,
2156                              max_t(s64, 0, sectors_available - sectors));
2157                 this_cpu_add(c->usage[0]->online_reserved, sectors);
2158                 res->sectors                    += sectors;
2159                 ret = 0;
2160         } else {
2161                 atomic64_set(&c->sectors_available, sectors_available);
2162                 ret = -ENOSPC;
2163         }
2164
2165         mutex_unlock(&c->sectors_available_lock);
2166         percpu_up_read(&c->mark_lock);
2167
2168         return ret;
2169 }
2170
2171 /* Startup/shutdown: */
2172
2173 static void buckets_free_rcu(struct rcu_head *rcu)
2174 {
2175         struct bucket_array *buckets =
2176                 container_of(rcu, struct bucket_array, rcu);
2177
2178         kvpfree(buckets,
2179                 sizeof(struct bucket_array) +
2180                 buckets->nbuckets * sizeof(struct bucket));
2181 }
2182
2183 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2184 {
2185         struct bucket_array *buckets = NULL, *old_buckets = NULL;
2186         unsigned long *buckets_nouse = NULL;
2187         alloc_fifo      free[RESERVE_NR];
2188         alloc_fifo      free_inc;
2189         alloc_heap      alloc_heap;
2190
2191         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2192                              ca->mi.bucket_size / c->opts.btree_node_size);
2193         /* XXX: these should be tunable */
2194         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
2195         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 7);
2196         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
2197                                       btree_reserve * 2);
2198         bool resize = ca->buckets[0] != NULL;
2199         int ret = -ENOMEM;
2200         unsigned i;
2201
2202         memset(&free,           0, sizeof(free));
2203         memset(&free_inc,       0, sizeof(free_inc));
2204         memset(&alloc_heap,     0, sizeof(alloc_heap));
2205
2206         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
2207                                             nbuckets * sizeof(struct bucket),
2208                                             GFP_KERNEL|__GFP_ZERO)) ||
2209             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2210                                             sizeof(unsigned long),
2211                                             GFP_KERNEL|__GFP_ZERO)) ||
2212             !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
2213             !init_fifo(&free[RESERVE_MOVINGGC],
2214                        copygc_reserve, GFP_KERNEL) ||
2215             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2216             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
2217             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2218                 goto err;
2219
2220         buckets->first_bucket   = ca->mi.first_bucket;
2221         buckets->nbuckets       = nbuckets;
2222
2223         bch2_copygc_stop(c);
2224
2225         if (resize) {
2226                 down_write(&c->gc_lock);
2227                 down_write(&ca->bucket_lock);
2228                 percpu_down_write(&c->mark_lock);
2229         }
2230
2231         old_buckets = bucket_array(ca);
2232
2233         if (resize) {
2234                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2235
2236                 memcpy(buckets->b,
2237                        old_buckets->b,
2238                        n * sizeof(struct bucket));
2239                 memcpy(buckets_nouse,
2240                        ca->buckets_nouse,
2241                        BITS_TO_LONGS(n) * sizeof(unsigned long));
2242         }
2243
2244         rcu_assign_pointer(ca->buckets[0], buckets);
2245         buckets = old_buckets;
2246
2247         swap(ca->buckets_nouse, buckets_nouse);
2248
2249         if (resize) {
2250                 percpu_up_write(&c->mark_lock);
2251                 up_write(&c->gc_lock);
2252         }
2253
2254         spin_lock(&c->freelist_lock);
2255         for (i = 0; i < RESERVE_NR; i++) {
2256                 fifo_move(&free[i], &ca->free[i]);
2257                 swap(ca->free[i], free[i]);
2258         }
2259         fifo_move(&free_inc, &ca->free_inc);
2260         swap(ca->free_inc, free_inc);
2261         spin_unlock(&c->freelist_lock);
2262
2263         /* with gc lock held, alloc_heap can't be in use: */
2264         swap(ca->alloc_heap, alloc_heap);
2265
2266         nbuckets = ca->mi.nbuckets;
2267
2268         if (resize)
2269                 up_write(&ca->bucket_lock);
2270
2271         ret = 0;
2272 err:
2273         free_heap(&alloc_heap);
2274         free_fifo(&free_inc);
2275         for (i = 0; i < RESERVE_NR; i++)
2276                 free_fifo(&free[i]);
2277         kvpfree(buckets_nouse,
2278                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2279         if (buckets)
2280                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2281
2282         return ret;
2283 }
2284
2285 void bch2_dev_buckets_free(struct bch_dev *ca)
2286 {
2287         unsigned i;
2288
2289         free_heap(&ca->alloc_heap);
2290         free_fifo(&ca->free_inc);
2291         for (i = 0; i < RESERVE_NR; i++)
2292                 free_fifo(&ca->free[i]);
2293         kvpfree(ca->buckets_nouse,
2294                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2295         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2296                 sizeof(struct bucket_array) +
2297                 ca->mi.nbuckets * sizeof(struct bucket));
2298
2299         free_percpu(ca->usage[0]);
2300 }
2301
2302 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2303 {
2304         if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2305                 return -ENOMEM;
2306
2307         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2308 }