]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
Update bcachefs sources to 26409a8f75 bcachefs: Journal updates to dev usage
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  *
7  * Bucket states:
8  * - free bucket: mark == 0
9  *   The bucket contains no data and will not be read
10  *
11  * - allocator bucket: owned_by_allocator == 1
12  *   The bucket is on a free list, or it is an open bucket
13  *
14  * - cached bucket: owned_by_allocator == 0 &&
15  *                  dirty_sectors == 0 &&
16  *                  cached_sectors > 0
17  *   The bucket contains data but may be safely discarded as there are
18  *   enough replicas of the data on other cache devices, or it has been
19  *   written back to the backing device
20  *
21  * - dirty bucket: owned_by_allocator == 0 &&
22  *                 dirty_sectors > 0
23  *   The bucket contains data that we must not discard (either only copy,
24  *   or one of the 'main copies' for data requiring multiple replicas)
25  *
26  * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27  *   This is a btree node, journal or gen/prio bucket
28  *
29  * Lifecycle:
30  *
31  * bucket invalidated => bucket on freelist => open bucket =>
32  *     [dirty bucket =>] cached bucket => bucket invalidated => ...
33  *
34  * Note that cache promotion can skip the dirty bucket step, as data
35  * is copied from a deeper tier to a shallower tier, onto a cached
36  * bucket.
37  * Note also that a cached bucket can spontaneously become dirty --
38  * see below.
39  *
40  * Only a traversal of the key space can determine whether a bucket is
41  * truly dirty or cached.
42  *
43  * Transitions:
44  *
45  * - free => allocator: bucket was invalidated
46  * - cached => allocator: bucket was invalidated
47  *
48  * - allocator => dirty: open bucket was filled up
49  * - allocator => cached: open bucket was filled up
50  * - allocator => metadata: metadata was allocated
51  *
52  * - dirty => cached: dirty sectors were copied to a deeper tier
53  * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54  * - cached => free: cached sectors were overwritten
55  *
56  * - metadata => free: metadata was freed
57  *
58  * Oddities:
59  * - cached => dirty: a device was removed so formerly replicated data
60  *                    is no longer sufficiently replicated
61  * - free => cached: cannot happen
62  * - free => dirty: cannot happen
63  * - free => metadata: cannot happen
64  */
65
66 #include "bcachefs.h"
67 #include "alloc_background.h"
68 #include "bset.h"
69 #include "btree_gc.h"
70 #include "btree_update.h"
71 #include "buckets.h"
72 #include "ec.h"
73 #include "error.h"
74 #include "movinggc.h"
75 #include "replicas.h"
76
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
79
80 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
81                                               enum bch_data_type data_type,
82                                               s64 sectors)
83 {
84         switch (data_type) {
85         case BCH_DATA_btree:
86                 fs_usage->btree         += sectors;
87                 break;
88         case BCH_DATA_user:
89         case BCH_DATA_parity:
90                 fs_usage->data          += sectors;
91                 break;
92         case BCH_DATA_cached:
93                 fs_usage->cached        += sectors;
94                 break;
95         default:
96                 break;
97         }
98 }
99
100 /*
101  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
102  * wraparound:
103  */
104 void bch2_bucket_seq_cleanup(struct bch_fs *c)
105 {
106         u64 journal_seq = atomic64_read(&c->journal.seq);
107         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
108         struct bch_dev *ca;
109         struct bucket_array *buckets;
110         struct bucket *g;
111         struct bucket_mark m;
112         unsigned i;
113
114         if (journal_seq - c->last_bucket_seq_cleanup <
115             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
116                 return;
117
118         c->last_bucket_seq_cleanup = journal_seq;
119
120         for_each_member_device(ca, c, i) {
121                 down_read(&ca->bucket_lock);
122                 buckets = bucket_array(ca);
123
124                 for_each_bucket(g, buckets) {
125                         bucket_cmpxchg(g, m, ({
126                                 if (!m.journal_seq_valid ||
127                                     bucket_needs_journal_commit(m, last_seq_ondisk))
128                                         break;
129
130                                 m.journal_seq_valid = 0;
131                         }));
132                 }
133                 up_read(&ca->bucket_lock);
134         }
135 }
136
137 void bch2_fs_usage_initialize(struct bch_fs *c)
138 {
139         struct bch_fs_usage *usage;
140         struct bch_dev *ca;
141         unsigned i;
142
143         percpu_down_write(&c->mark_lock);
144         usage = c->usage_base;
145
146         for (i = 0; i < ARRAY_SIZE(c->usage); i++)
147                 bch2_fs_usage_acc_to_base(c, i);
148
149         for (i = 0; i < BCH_REPLICAS_MAX; i++)
150                 usage->reserved += usage->persistent_reserved[i];
151
152         for (i = 0; i < c->replicas.nr; i++) {
153                 struct bch_replicas_entry *e =
154                         cpu_replicas_entry(&c->replicas, i);
155
156                 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
157         }
158
159         for_each_member_device(ca, c, i) {
160                 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
161
162                 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
163                                   dev.d[BCH_DATA_journal].buckets) *
164                         ca->mi.bucket_size;
165         }
166
167         percpu_up_write(&c->mark_lock);
168 }
169
170 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
171 {
172         if (fs_usage == c->usage_scratch)
173                 mutex_unlock(&c->usage_scratch_lock);
174         else
175                 kfree(fs_usage);
176 }
177
178 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
179 {
180         struct bch_fs_usage *ret;
181         unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
182
183         ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
184         if (ret)
185                 return ret;
186
187         if (mutex_trylock(&c->usage_scratch_lock))
188                 goto out_pool;
189
190         ret = kzalloc(bytes, GFP_NOFS);
191         if (ret)
192                 return ret;
193
194         mutex_lock(&c->usage_scratch_lock);
195 out_pool:
196         ret = c->usage_scratch;
197         memset(ret, 0, bytes);
198         return ret;
199 }
200
201 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
202                                                   unsigned journal_seq,
203                                                   bool gc)
204 {
205         return this_cpu_ptr(gc
206                             ? ca->usage_gc
207                             : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
208 }
209
210 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
211 {
212         struct bch_fs *c = ca->fs;
213         struct bch_dev_usage ret;
214         unsigned seq, i, u64s = dev_usage_u64s();
215
216         do {
217                 seq = read_seqcount_begin(&c->usage_lock);
218                 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
219                 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
220                         acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
221         } while (read_seqcount_retry(&c->usage_lock, seq));
222
223         return ret;
224 }
225
226 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
227                                                 unsigned journal_seq,
228                                                 bool gc)
229 {
230         return this_cpu_ptr(gc
231                             ? c->usage_gc
232                             : c->usage[journal_seq & JOURNAL_BUF_MASK]);
233 }
234
235 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
236 {
237         ssize_t offset = v - (u64 *) c->usage_base;
238         unsigned i, seq;
239         u64 ret;
240
241         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
242         percpu_rwsem_assert_held(&c->mark_lock);
243
244         do {
245                 seq = read_seqcount_begin(&c->usage_lock);
246                 ret = *v;
247
248                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
249                         ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
250         } while (read_seqcount_retry(&c->usage_lock, seq));
251
252         return ret;
253 }
254
255 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
256 {
257         struct bch_fs_usage *ret;
258         unsigned seq, i, v, u64s = fs_usage_u64s(c);
259 retry:
260         ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
261         if (unlikely(!ret))
262                 return NULL;
263
264         percpu_down_read(&c->mark_lock);
265
266         v = fs_usage_u64s(c);
267         if (unlikely(u64s != v)) {
268                 u64s = v;
269                 percpu_up_read(&c->mark_lock);
270                 kfree(ret);
271                 goto retry;
272         }
273
274         do {
275                 seq = read_seqcount_begin(&c->usage_lock);
276                 memcpy(ret, c->usage_base, u64s * sizeof(u64));
277                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
278                         acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[i], u64s);
279         } while (read_seqcount_retry(&c->usage_lock, seq));
280
281         return ret;
282 }
283
284 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
285 {
286         struct bch_dev *ca;
287         unsigned i, u64s = fs_usage_u64s(c);
288
289         BUG_ON(idx >= ARRAY_SIZE(c->usage));
290
291         preempt_disable();
292         write_seqcount_begin(&c->usage_lock);
293
294         acc_u64s_percpu((u64 *) c->usage_base,
295                         (u64 __percpu *) c->usage[idx], u64s);
296         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
297
298         rcu_read_lock();
299         for_each_member_device_rcu(ca, c, i, NULL) {
300                 u64s = dev_usage_u64s();
301
302                 acc_u64s_percpu((u64 *) ca->usage_base,
303                                 (u64 __percpu *) ca->usage[idx], u64s);
304                 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
305         }
306         rcu_read_unlock();
307
308         write_seqcount_end(&c->usage_lock);
309         preempt_enable();
310 }
311
312 void bch2_fs_usage_to_text(struct printbuf *out,
313                            struct bch_fs *c,
314                            struct bch_fs_usage *fs_usage)
315 {
316         unsigned i;
317
318         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
319
320         pr_buf(out, "hidden:\t\t\t\t%llu\n",
321                fs_usage->hidden);
322         pr_buf(out, "data:\t\t\t\t%llu\n",
323                fs_usage->data);
324         pr_buf(out, "cached:\t\t\t\t%llu\n",
325                fs_usage->cached);
326         pr_buf(out, "reserved:\t\t\t%llu\n",
327                fs_usage->reserved);
328         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
329                fs_usage->nr_inodes);
330         pr_buf(out, "online reserved:\t\t%llu\n",
331                fs_usage->online_reserved);
332
333         for (i = 0;
334              i < ARRAY_SIZE(fs_usage->persistent_reserved);
335              i++) {
336                 pr_buf(out, "%u replicas:\n", i + 1);
337                 pr_buf(out, "\treserved:\t\t%llu\n",
338                        fs_usage->persistent_reserved[i]);
339         }
340
341         for (i = 0; i < c->replicas.nr; i++) {
342                 struct bch_replicas_entry *e =
343                         cpu_replicas_entry(&c->replicas, i);
344
345                 pr_buf(out, "\t");
346                 bch2_replicas_entry_to_text(out, e);
347                 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
348         }
349 }
350
351 #define RESERVE_FACTOR  6
352
353 static u64 reserve_factor(u64 r)
354 {
355         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
356 }
357
358 static u64 avail_factor(u64 r)
359 {
360         return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
361 }
362
363 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
364 {
365         return min(fs_usage->hidden +
366                    fs_usage->btree +
367                    fs_usage->data +
368                    reserve_factor(fs_usage->reserved +
369                                   fs_usage->online_reserved),
370                    c->capacity);
371 }
372
373 static struct bch_fs_usage_short
374 __bch2_fs_usage_read_short(struct bch_fs *c)
375 {
376         struct bch_fs_usage_short ret;
377         u64 data, reserved;
378
379         ret.capacity = c->capacity -
380                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
381
382         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
383                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
384         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
385                 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
386
387         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
388         ret.free        = ret.capacity - ret.used;
389
390         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
391
392         return ret;
393 }
394
395 struct bch_fs_usage_short
396 bch2_fs_usage_read_short(struct bch_fs *c)
397 {
398         struct bch_fs_usage_short ret;
399
400         percpu_down_read(&c->mark_lock);
401         ret = __bch2_fs_usage_read_short(c);
402         percpu_up_read(&c->mark_lock);
403
404         return ret;
405 }
406
407 static inline int is_unavailable_bucket(struct bucket_mark m)
408 {
409         return !is_available_bucket(m);
410 }
411
412 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
413                                             struct bucket_mark m)
414 {
415         return bucket_sectors_used(m)
416                 ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
417                 : 0;
418 }
419
420 static inline int is_stripe_data_bucket(struct bucket_mark m)
421 {
422         return m.stripe && m.data_type != BCH_DATA_parity;
423 }
424
425 static inline enum bch_data_type bucket_type(struct bucket_mark m)
426 {
427         return m.cached_sectors && !m.dirty_sectors
428                 ? BCH_DATA_cached
429                 : m.data_type;
430 }
431
432 static bool bucket_became_unavailable(struct bucket_mark old,
433                                       struct bucket_mark new)
434 {
435         return is_available_bucket(old) &&
436                !is_available_bucket(new);
437 }
438
439 int bch2_fs_usage_apply(struct bch_fs *c,
440                         struct bch_fs_usage *fs_usage,
441                         struct disk_reservation *disk_res,
442                         unsigned journal_seq)
443 {
444         s64 added = fs_usage->data + fs_usage->reserved;
445         s64 should_not_have_added;
446         int ret = 0;
447
448         percpu_rwsem_assert_held(&c->mark_lock);
449
450         /*
451          * Not allowed to reduce sectors_available except by getting a
452          * reservation:
453          */
454         should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
455         if (WARN_ONCE(should_not_have_added > 0,
456                       "disk usage increased by %lli more than reservation of %llu",
457                       added, disk_res ? disk_res->sectors : 0)) {
458                 atomic64_sub(should_not_have_added, &c->sectors_available);
459                 added -= should_not_have_added;
460                 ret = -1;
461         }
462
463         if (added > 0) {
464                 disk_res->sectors               -= added;
465                 fs_usage->online_reserved       -= added;
466         }
467
468         preempt_disable();
469         acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
470                  (u64 *) fs_usage, fs_usage_u64s(c));
471         preempt_enable();
472
473         return ret;
474 }
475
476 static inline void account_bucket(struct bch_fs_usage *fs_usage,
477                                   struct bch_dev_usage *dev_usage,
478                                   enum bch_data_type type,
479                                   int nr, s64 size)
480 {
481         if (type == BCH_DATA_sb || type == BCH_DATA_journal)
482                 fs_usage->hidden        += size;
483
484         dev_usage->d[type].buckets      += nr;
485 }
486
487 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
488                                   struct bch_fs_usage *fs_usage,
489                                   struct bucket_mark old, struct bucket_mark new,
490                                   u64 journal_seq, bool gc)
491 {
492         struct bch_dev_usage *u;
493
494         percpu_rwsem_assert_held(&c->mark_lock);
495
496         preempt_disable();
497         u = dev_usage_ptr(ca, journal_seq, gc);
498
499         if (bucket_type(old))
500                 account_bucket(fs_usage, u, bucket_type(old),
501                                -1, -ca->mi.bucket_size);
502
503         if (bucket_type(new))
504                 account_bucket(fs_usage, u, bucket_type(new),
505                                1, ca->mi.bucket_size);
506
507         u->buckets_alloc +=
508                 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
509         u->buckets_ec += (int) new.stripe - (int) old.stripe;
510         u->buckets_unavailable +=
511                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
512
513         u->d[old.data_type].sectors -= old.dirty_sectors;
514         u->d[new.data_type].sectors += new.dirty_sectors;
515         u->d[BCH_DATA_cached].sectors +=
516                 (int) new.cached_sectors - (int) old.cached_sectors;
517
518         u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
519         u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
520
521         preempt_enable();
522
523         if (!is_available_bucket(old) && is_available_bucket(new))
524                 bch2_wake_allocator(ca);
525 }
526
527 static inline int update_replicas(struct bch_fs *c,
528                                   struct bch_fs_usage *fs_usage,
529                                   struct bch_replicas_entry *r,
530                                   s64 sectors)
531 {
532         int idx = bch2_replicas_entry_idx(c, r);
533
534         if (idx < 0)
535                 return -1;
536
537         if (!fs_usage)
538                 return 0;
539
540         fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
541         fs_usage->replicas[idx]         += sectors;
542         return 0;
543 }
544
545 static inline void update_cached_sectors(struct bch_fs *c,
546                                          struct bch_fs_usage *fs_usage,
547                                          unsigned dev, s64 sectors)
548 {
549         struct bch_replicas_padded r;
550
551         bch2_replicas_entry_cached(&r.e, dev);
552
553         update_replicas(c, fs_usage, &r.e, sectors);
554 }
555
556 static struct replicas_delta_list *
557 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
558 {
559         struct replicas_delta_list *d = trans->fs_usage_deltas;
560         unsigned new_size = d ? (d->size + more) * 2 : 128;
561
562         if (!d || d->used + more > d->size) {
563                 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
564                 BUG_ON(!d);
565
566                 d->size = new_size;
567                 trans->fs_usage_deltas = d;
568         }
569         return d;
570 }
571
572 static inline void update_replicas_list(struct btree_trans *trans,
573                                         struct bch_replicas_entry *r,
574                                         s64 sectors)
575 {
576         struct replicas_delta_list *d;
577         struct replicas_delta *n;
578         unsigned b;
579
580         if (!sectors)
581                 return;
582
583         b = replicas_entry_bytes(r) + 8;
584         d = replicas_deltas_realloc(trans, b);
585
586         n = (void *) d->d + d->used;
587         n->delta = sectors;
588         memcpy(&n->r, r, replicas_entry_bytes(r));
589         d->used += b;
590 }
591
592 static inline void update_cached_sectors_list(struct btree_trans *trans,
593                                               unsigned dev, s64 sectors)
594 {
595         struct bch_replicas_padded r;
596
597         bch2_replicas_entry_cached(&r.e, dev);
598
599         update_replicas_list(trans, &r.e, sectors);
600 }
601
602 static inline struct replicas_delta *
603 replicas_delta_next(struct replicas_delta *d)
604 {
605         return (void *) d + replicas_entry_bytes(&d->r) + 8;
606 }
607
608 int bch2_replicas_delta_list_apply(struct bch_fs *c,
609                                    struct bch_fs_usage *fs_usage,
610                                    struct replicas_delta_list *r)
611 {
612         struct replicas_delta *d = r->d;
613         struct replicas_delta *top = (void *) r->d + r->used;
614         unsigned i;
615
616         for (d = r->d; d != top; d = replicas_delta_next(d))
617                 if (update_replicas(c, fs_usage, &d->r, d->delta)) {
618                         top = d;
619                         goto unwind;
620                 }
621
622         if (!fs_usage)
623                 return 0;
624
625         fs_usage->nr_inodes += r->nr_inodes;
626
627         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
628                 fs_usage->reserved += r->persistent_reserved[i];
629                 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
630         }
631
632         return 0;
633 unwind:
634         for (d = r->d; d != top; d = replicas_delta_next(d))
635                 update_replicas(c, fs_usage, &d->r, -d->delta);
636         return -1;
637 }
638
639 #define do_mark_fn(fn, c, pos, flags, ...)                              \
640 ({                                                                      \
641         int gc, ret = 0;                                                \
642                                                                         \
643         percpu_rwsem_assert_held(&c->mark_lock);                        \
644                                                                         \
645         for (gc = 0; gc < 2 && !ret; gc++)                              \
646                 if (!gc == !(flags & BTREE_TRIGGER_GC) ||               \
647                     (gc && gc_visited(c, pos)))                         \
648                         ret = fn(c, __VA_ARGS__, gc);                   \
649         ret;                                                            \
650 })
651
652 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
653                                     size_t b, bool owned_by_allocator,
654                                     bool gc)
655 {
656         struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
657         struct bucket *g = __bucket(ca, b, gc);
658         struct bucket_mark old, new;
659
660         old = bucket_cmpxchg(g, new, ({
661                 new.owned_by_allocator  = owned_by_allocator;
662         }));
663
664         /*
665          * XXX: this is wrong, this means we'll be doing updates to the percpu
666          * buckets_alloc counter that don't have an open journal buffer and
667          * we'll race with the machinery that accumulates that to ca->usage_base
668          */
669         bch2_dev_usage_update(c, ca, fs_usage, old, new, 0, gc);
670
671         BUG_ON(!gc &&
672                !owned_by_allocator && !old.owned_by_allocator);
673
674         return 0;
675 }
676
677 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
678                             size_t b, bool owned_by_allocator,
679                             struct gc_pos pos, unsigned flags)
680 {
681         preempt_disable();
682
683         do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
684                    ca, b, owned_by_allocator);
685
686         preempt_enable();
687 }
688
689 static int bch2_mark_alloc(struct bch_fs *c,
690                            struct bkey_s_c old, struct bkey_s_c new,
691                            struct bch_fs_usage *fs_usage,
692                            u64 journal_seq, unsigned flags)
693 {
694         bool gc = flags & BTREE_TRIGGER_GC;
695         struct bkey_alloc_unpacked u;
696         struct bch_dev *ca;
697         struct bucket *g;
698         struct bucket_mark old_m, m;
699
700         /* We don't do anything for deletions - do we?: */
701         if (new.k->type != KEY_TYPE_alloc &&
702             new.k->type != KEY_TYPE_alloc_v2)
703                 return 0;
704
705         /*
706          * alloc btree is read in by bch2_alloc_read, not gc:
707          */
708         if ((flags & BTREE_TRIGGER_GC) &&
709             !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
710                 return 0;
711
712         ca = bch_dev_bkey_exists(c, new.k->p.inode);
713
714         if (new.k->p.offset >= ca->mi.nbuckets)
715                 return 0;
716
717         g = __bucket(ca, new.k->p.offset, gc);
718         u = bch2_alloc_unpack(new);
719
720         old_m = bucket_cmpxchg(g, m, ({
721                 m.gen                   = u.gen;
722                 m.data_type             = u.data_type;
723                 m.dirty_sectors         = u.dirty_sectors;
724                 m.cached_sectors        = u.cached_sectors;
725                 m.stripe                = u.stripe != 0;
726
727                 if (journal_seq) {
728                         m.journal_seq_valid     = 1;
729                         m.journal_seq           = journal_seq;
730                 }
731         }));
732
733         bch2_dev_usage_update(c, ca, fs_usage, old_m, m, journal_seq, gc);
734
735         g->io_time[READ]        = u.read_time;
736         g->io_time[WRITE]       = u.write_time;
737         g->oldest_gen           = u.oldest_gen;
738         g->gen_valid            = 1;
739         g->stripe               = u.stripe;
740         g->stripe_redundancy    = u.stripe_redundancy;
741
742         /*
743          * need to know if we're getting called from the invalidate path or
744          * not:
745          */
746
747         if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
748             old_m.cached_sectors) {
749                 update_cached_sectors(c, fs_usage, ca->dev_idx,
750                                       -old_m.cached_sectors);
751                 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
752                                  old_m.cached_sectors);
753         }
754
755         return 0;
756 }
757
758 #define checked_add(a, b)                                       \
759 ({                                                              \
760         unsigned _res = (unsigned) (a) + (b);                   \
761         bool overflow = _res > U16_MAX;                         \
762         if (overflow)                                           \
763                 _res = U16_MAX;                                 \
764         (a) = _res;                                             \
765         overflow;                                               \
766 })
767
768 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
769                                        size_t b, enum bch_data_type data_type,
770                                        unsigned sectors, bool gc)
771 {
772         struct bucket *g = __bucket(ca, b, gc);
773         struct bucket_mark old, new;
774         bool overflow;
775
776         BUG_ON(data_type != BCH_DATA_sb &&
777                data_type != BCH_DATA_journal);
778
779         old = bucket_cmpxchg(g, new, ({
780                 new.data_type   = data_type;
781                 overflow = checked_add(new.dirty_sectors, sectors);
782         }));
783
784         bch2_fs_inconsistent_on(old.data_type &&
785                                 old.data_type != data_type, c,
786                 "different types of data in same bucket: %s, %s",
787                 bch2_data_types[old.data_type],
788                 bch2_data_types[data_type]);
789
790         bch2_fs_inconsistent_on(overflow, c,
791                 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
792                 ca->dev_idx, b, new.gen,
793                 bch2_data_types[old.data_type ?: data_type],
794                 old.dirty_sectors, sectors);
795
796         if (c)
797                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
798                                       old, new, 0, gc);
799
800         return 0;
801 }
802
803 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
804                                size_t b, enum bch_data_type type,
805                                unsigned sectors, struct gc_pos pos,
806                                unsigned flags)
807 {
808         BUG_ON(type != BCH_DATA_sb &&
809                type != BCH_DATA_journal);
810
811         preempt_disable();
812
813         if (likely(c)) {
814                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
815                            ca, b, type, sectors);
816         } else {
817                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
818         }
819
820         preempt_enable();
821 }
822
823 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
824 {
825         return DIV_ROUND_UP(sectors * n, d);
826 }
827
828 static s64 __ptr_disk_sectors_delta(unsigned old_size,
829                                     unsigned offset, s64 delta,
830                                     unsigned flags,
831                                     unsigned n, unsigned d)
832 {
833         BUG_ON(!n || !d);
834
835         if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
836                 BUG_ON(offset + -delta > old_size);
837
838                 return -disk_sectors_scaled(n, d, old_size) +
839                         disk_sectors_scaled(n, d, offset) +
840                         disk_sectors_scaled(n, d, old_size - offset + delta);
841         } else if (flags & BTREE_TRIGGER_OVERWRITE) {
842                 BUG_ON(offset + -delta > old_size);
843
844                 return -disk_sectors_scaled(n, d, old_size) +
845                         disk_sectors_scaled(n, d, old_size + delta);
846         } else {
847                 return  disk_sectors_scaled(n, d, delta);
848         }
849 }
850
851 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
852                                   unsigned offset, s64 delta,
853                                   unsigned flags)
854 {
855         return __ptr_disk_sectors_delta(p.crc.live_size,
856                                         offset, delta, flags,
857                                         p.crc.compressed_size,
858                                         p.crc.uncompressed_size);
859 }
860
861 static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
862                             const struct bch_extent_ptr *ptr,
863                             s64 sectors, enum bch_data_type ptr_data_type,
864                             u8 bucket_gen, u8 bucket_data_type,
865                             u16 dirty_sectors, u16 cached_sectors)
866 {
867         size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
868         u16 bucket_sectors = !ptr->cached
869                 ? dirty_sectors
870                 : cached_sectors;
871         char buf[200];
872
873         if (gen_after(ptr->gen, bucket_gen)) {
874                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
875                         "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
876                         "while marking %s",
877                         ptr->dev, bucket_nr, bucket_gen,
878                         bch2_data_types[bucket_data_type ?: ptr_data_type],
879                         ptr->gen,
880                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
881                 return -EIO;
882         }
883
884         if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
885                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
886                         "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
887                         "while marking %s",
888                         ptr->dev, bucket_nr, bucket_gen,
889                         bch2_data_types[bucket_data_type ?: ptr_data_type],
890                         ptr->gen,
891                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
892                 return -EIO;
893         }
894
895         if (bucket_gen != ptr->gen && !ptr->cached) {
896                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
897                         "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
898                         "while marking %s",
899                         ptr->dev, bucket_nr, bucket_gen,
900                         bch2_data_types[bucket_data_type ?: ptr_data_type],
901                         ptr->gen,
902                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
903                 return -EIO;
904         }
905
906         if (bucket_gen != ptr->gen)
907                 return 1;
908
909         if (bucket_data_type && ptr_data_type &&
910             bucket_data_type != ptr_data_type) {
911                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
912                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
913                         "while marking %s",
914                         ptr->dev, bucket_nr, bucket_gen,
915                         bch2_data_types[bucket_data_type],
916                         bch2_data_types[ptr_data_type],
917                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
918                 return -EIO;
919         }
920
921         if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
922                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
923                         "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
924                         "while marking %s",
925                         ptr->dev, bucket_nr, bucket_gen,
926                         bch2_data_types[bucket_data_type ?: ptr_data_type],
927                         bucket_sectors, sectors,
928                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
929                 return -EIO;
930         }
931
932         return 0;
933 }
934
935 static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k,
936                              unsigned ptr_idx,
937                              struct bch_fs_usage *fs_usage,
938                              u64 journal_seq, unsigned flags)
939 {
940         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
941         unsigned nr_data = s->nr_blocks - s->nr_redundant;
942         bool parity = ptr_idx >= nr_data;
943         const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
944         bool gc = flags & BTREE_TRIGGER_GC;
945         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
946         struct bucket *g = PTR_BUCKET(ca, ptr, gc);
947         struct bucket_mark new, old;
948         char buf[200];
949         int ret;
950
951         if (g->stripe && g->stripe != k.k->p.offset) {
952                 bch2_fs_inconsistent(c,
953                               "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
954                               ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen,
955                               (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
956                 return -EINVAL;
957         }
958
959         old = bucket_cmpxchg(g, new, ({
960                 ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
961                                        new.dirty_sectors, new.cached_sectors);
962                 if (ret)
963                         return ret;
964
965                 if (parity) {
966                         new.data_type           = BCH_DATA_parity;
967                         new.dirty_sectors       = le16_to_cpu(s->sectors);
968                 }
969
970                 if (journal_seq) {
971                         new.journal_seq_valid   = 1;
972                         new.journal_seq         = journal_seq;
973                 }
974         }));
975
976         g->stripe               = k.k->p.offset;
977         g->stripe_redundancy    = s->nr_redundant;
978
979         bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
980         return 0;
981 }
982
983 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
984                           const struct bch_extent_ptr *ptr,
985                           s64 sectors, enum bch_data_type ptr_data_type,
986                           u8 bucket_gen, u8 *bucket_data_type,
987                           u16 *dirty_sectors, u16 *cached_sectors)
988 {
989         u16 *dst_sectors = !ptr->cached
990                 ? dirty_sectors
991                 : cached_sectors;
992         int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
993                                    bucket_gen, *bucket_data_type,
994                                    *dirty_sectors, *cached_sectors);
995
996         if (ret)
997                 return ret;
998
999         *dst_sectors += sectors;
1000         *bucket_data_type = *dirty_sectors || *cached_sectors
1001                 ? ptr_data_type : 0;
1002         return 0;
1003 }
1004
1005 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1006                              struct extent_ptr_decoded p,
1007                              s64 sectors, enum bch_data_type data_type,
1008                              struct bch_fs_usage *fs_usage,
1009                              u64 journal_seq, unsigned flags)
1010 {
1011         bool gc = flags & BTREE_TRIGGER_GC;
1012         struct bucket_mark old, new;
1013         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1014         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
1015         u8 bucket_data_type;
1016         u64 v;
1017         int ret;
1018
1019         v = atomic64_read(&g->_mark.v);
1020         do {
1021                 new.v.counter = old.v.counter = v;
1022                 bucket_data_type = new.data_type;
1023
1024                 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
1025                                      &bucket_data_type,
1026                                      &new.dirty_sectors,
1027                                      &new.cached_sectors);
1028                 if (ret)
1029                         return ret;
1030
1031                 new.data_type = bucket_data_type;
1032
1033                 if (journal_seq) {
1034                         new.journal_seq_valid = 1;
1035                         new.journal_seq = journal_seq;
1036                 }
1037
1038                 if (flags & BTREE_TRIGGER_NOATOMIC) {
1039                         g->_mark = new;
1040                         break;
1041                 }
1042         } while ((v = atomic64_cmpxchg(&g->_mark.v,
1043                               old.v.counter,
1044                               new.v.counter)) != old.v.counter);
1045
1046         bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
1047
1048         BUG_ON(!gc && bucket_became_unavailable(old, new));
1049
1050         return 0;
1051 }
1052
1053 static int bch2_mark_stripe_ptr(struct bch_fs *c,
1054                                 struct bch_extent_stripe_ptr p,
1055                                 enum bch_data_type data_type,
1056                                 struct bch_fs_usage *fs_usage,
1057                                 s64 sectors, unsigned flags)
1058 {
1059         bool gc = flags & BTREE_TRIGGER_GC;
1060         struct bch_replicas_padded r;
1061         struct stripe *m;
1062         unsigned i, blocks_nonempty = 0;
1063
1064         m = genradix_ptr(&c->stripes[gc], p.idx);
1065
1066         spin_lock(&c->ec_stripes_heap_lock);
1067
1068         if (!m || !m->alive) {
1069                 spin_unlock(&c->ec_stripes_heap_lock);
1070                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1071                                     (u64) p.idx);
1072                 return -EIO;
1073         }
1074
1075         m->block_sectors[p.block] += sectors;
1076
1077         r = m->r;
1078
1079         for (i = 0; i < m->nr_blocks; i++)
1080                 blocks_nonempty += m->block_sectors[i] != 0;
1081
1082         if (m->blocks_nonempty != blocks_nonempty) {
1083                 m->blocks_nonempty = blocks_nonempty;
1084                 if (!gc)
1085                         bch2_stripes_heap_update(c, m, p.idx);
1086         }
1087
1088         spin_unlock(&c->ec_stripes_heap_lock);
1089
1090         r.e.data_type = data_type;
1091         update_replicas(c, fs_usage, &r.e, sectors);
1092
1093         return 0;
1094 }
1095
1096 static int bch2_mark_extent(struct bch_fs *c,
1097                             struct bkey_s_c old, struct bkey_s_c new,
1098                             unsigned offset, s64 sectors,
1099                             enum bch_data_type data_type,
1100                             struct bch_fs_usage *fs_usage,
1101                             unsigned journal_seq, unsigned flags)
1102 {
1103         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1104         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1105         const union bch_extent_entry *entry;
1106         struct extent_ptr_decoded p;
1107         struct bch_replicas_padded r;
1108         s64 dirty_sectors = 0;
1109         bool stale;
1110         int ret;
1111
1112         r.e.data_type   = data_type;
1113         r.e.nr_devs     = 0;
1114         r.e.nr_required = 1;
1115
1116         BUG_ON(!sectors);
1117
1118         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1119                 s64 disk_sectors = data_type == BCH_DATA_btree
1120                         ? sectors
1121                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1122
1123                 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
1124                                         fs_usage, journal_seq, flags);
1125                 if (ret < 0)
1126                         return ret;
1127
1128                 stale = ret > 0;
1129
1130                 if (p.ptr.cached) {
1131                         if (!stale)
1132                                 update_cached_sectors(c, fs_usage, p.ptr.dev,
1133                                                       disk_sectors);
1134                 } else if (!p.has_ec) {
1135                         dirty_sectors          += disk_sectors;
1136                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1137                 } else {
1138                         ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1139                                         fs_usage, disk_sectors, flags);
1140                         if (ret)
1141                                 return ret;
1142
1143                         /*
1144                          * There may be other dirty pointers in this extent, but
1145                          * if so they're not required for mounting if we have an
1146                          * erasure coded pointer in this extent:
1147                          */
1148                         r.e.nr_required = 0;
1149                 }
1150         }
1151
1152         if (r.e.nr_devs)
1153                 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1154
1155         return 0;
1156 }
1157
1158 static int bch2_mark_stripe(struct bch_fs *c,
1159                             struct bkey_s_c old, struct bkey_s_c new,
1160                             struct bch_fs_usage *fs_usage,
1161                             u64 journal_seq, unsigned flags)
1162 {
1163         bool gc = flags & BTREE_TRIGGER_GC;
1164         size_t idx = new.k->p.offset;
1165         const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1166                 ? bkey_s_c_to_stripe(old).v : NULL;
1167         const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1168                 ? bkey_s_c_to_stripe(new).v : NULL;
1169         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1170         unsigned i;
1171         int ret;
1172
1173         BUG_ON(gc && old_s);
1174
1175         if (!m || (old_s && !m->alive)) {
1176                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1177                                     idx);
1178                 return -1;
1179         }
1180
1181         if (!new_s) {
1182                 spin_lock(&c->ec_stripes_heap_lock);
1183                 bch2_stripes_heap_del(c, m, idx);
1184                 spin_unlock(&c->ec_stripes_heap_lock);
1185
1186                 memset(m, 0, sizeof(*m));
1187         } else {
1188                 m->alive        = true;
1189                 m->sectors      = le16_to_cpu(new_s->sectors);
1190                 m->algorithm    = new_s->algorithm;
1191                 m->nr_blocks    = new_s->nr_blocks;
1192                 m->nr_redundant = new_s->nr_redundant;
1193                 m->blocks_nonempty = 0;
1194
1195                 for (i = 0; i < new_s->nr_blocks; i++) {
1196                         m->block_sectors[i] =
1197                                 stripe_blockcount_get(new_s, i);
1198                         m->blocks_nonempty += !!m->block_sectors[i];
1199                 }
1200
1201                 bch2_bkey_to_replicas(&m->r.e, new);
1202
1203                 if (!gc) {
1204                         spin_lock(&c->ec_stripes_heap_lock);
1205                         bch2_stripes_heap_update(c, m, idx);
1206                         spin_unlock(&c->ec_stripes_heap_lock);
1207                 }
1208         }
1209
1210         if (gc) {
1211                 /*
1212                  * gc recalculates this field from stripe ptr
1213                  * references:
1214                  */
1215                 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1216                 m->blocks_nonempty = 0;
1217
1218                 for (i = 0; i < new_s->nr_blocks; i++) {
1219                         ret = mark_stripe_bucket(c, new, i, fs_usage,
1220                                                  journal_seq, flags);
1221                         if (ret)
1222                                 return ret;
1223                 }
1224
1225                 update_replicas(c, fs_usage, &m->r.e,
1226                                 ((s64) m->sectors * m->nr_redundant));
1227         }
1228
1229         return 0;
1230 }
1231
1232 static int bch2_mark_key_locked(struct bch_fs *c,
1233                    struct bkey_s_c old,
1234                    struct bkey_s_c new,
1235                    unsigned offset, s64 sectors,
1236                    struct bch_fs_usage *fs_usage,
1237                    u64 journal_seq, unsigned flags)
1238 {
1239         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1240         int ret = 0;
1241
1242         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1243
1244         preempt_disable();
1245
1246         if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1247                 fs_usage = fs_usage_ptr(c, journal_seq,
1248                                         flags & BTREE_TRIGGER_GC);
1249
1250         switch (k.k->type) {
1251         case KEY_TYPE_alloc:
1252         case KEY_TYPE_alloc_v2:
1253                 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1254                 break;
1255         case KEY_TYPE_btree_ptr:
1256         case KEY_TYPE_btree_ptr_v2:
1257                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1258                         ?  c->opts.btree_node_size
1259                         : -c->opts.btree_node_size;
1260
1261                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1262                                 BCH_DATA_btree, fs_usage, journal_seq, flags);
1263                 break;
1264         case KEY_TYPE_extent:
1265         case KEY_TYPE_reflink_v:
1266                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1267                                 BCH_DATA_user, fs_usage, journal_seq, flags);
1268                 break;
1269         case KEY_TYPE_stripe:
1270                 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1271                 break;
1272         case KEY_TYPE_inode:
1273                 fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode;
1274                 fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode;
1275                 break;
1276         case KEY_TYPE_reservation: {
1277                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1278
1279                 sectors *= replicas;
1280                 replicas = clamp_t(unsigned, replicas, 1,
1281                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1282
1283                 fs_usage->reserved                              += sectors;
1284                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1285                 break;
1286         }
1287         }
1288
1289         preempt_enable();
1290
1291         return ret;
1292 }
1293
1294 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1295                   unsigned offset, s64 sectors,
1296                   struct bch_fs_usage *fs_usage,
1297                   u64 journal_seq, unsigned flags)
1298 {
1299         struct bkey deleted;
1300         struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1301         int ret;
1302
1303         bkey_init(&deleted);
1304
1305         percpu_down_read(&c->mark_lock);
1306         ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1307                                    fs_usage, journal_seq,
1308                                    BTREE_TRIGGER_INSERT|flags);
1309         percpu_up_read(&c->mark_lock);
1310
1311         return ret;
1312 }
1313
1314 int bch2_mark_update(struct btree_trans *trans,
1315                      struct btree_iter *iter,
1316                      struct bkey_i *new,
1317                      struct bch_fs_usage *fs_usage,
1318                      unsigned flags)
1319 {
1320         struct bch_fs           *c = trans->c;
1321         struct btree            *b = iter_l(iter)->b;
1322         struct btree_node_iter  node_iter = iter_l(iter)->iter;
1323         struct bkey_packed      *_old;
1324         struct bkey_s_c         old;
1325         struct bkey             unpacked;
1326         int ret = 0;
1327
1328         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1329                 return 0;
1330
1331         if (!btree_node_type_needs_gc(iter->btree_id))
1332                 return 0;
1333
1334         bkey_init(&unpacked);
1335         old = (struct bkey_s_c) { &unpacked, NULL };
1336
1337         if (!btree_node_type_is_extents(iter->btree_id)) {
1338                 /* iterators should be uptodate, shouldn't get errors here: */
1339                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1340                         old = bch2_btree_iter_peek_slot(iter);
1341                         BUG_ON(bkey_err(old));
1342                 } else {
1343                         struct bkey_cached *ck = (void *) iter->l[0].b;
1344
1345                         if (ck->valid)
1346                                 old = bkey_i_to_s_c(ck->k);
1347                 }
1348
1349                 if (old.k->type == new->k.type) {
1350                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1351                                 fs_usage, trans->journal_res.seq,
1352                                 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1353
1354                 } else {
1355                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1356                                 fs_usage, trans->journal_res.seq,
1357                                 BTREE_TRIGGER_INSERT|flags);
1358                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1359                                 fs_usage, trans->journal_res.seq,
1360                                 BTREE_TRIGGER_OVERWRITE|flags);
1361                 }
1362         } else {
1363                 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1364                 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1365                         0, new->k.size,
1366                         fs_usage, trans->journal_res.seq,
1367                         BTREE_TRIGGER_INSERT|flags);
1368
1369                 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
1370                         unsigned offset = 0;
1371                         s64 sectors;
1372
1373                         old = bkey_disassemble(b, _old, &unpacked);
1374                         sectors = -((s64) old.k->size);
1375
1376                         flags |= BTREE_TRIGGER_OVERWRITE;
1377
1378                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1379                                 return 0;
1380
1381                         switch (bch2_extent_overlap(&new->k, old.k)) {
1382                         case BCH_EXTENT_OVERLAP_ALL:
1383                                 offset = 0;
1384                                 sectors = -((s64) old.k->size);
1385                                 break;
1386                         case BCH_EXTENT_OVERLAP_BACK:
1387                                 offset = bkey_start_offset(&new->k) -
1388                                         bkey_start_offset(old.k);
1389                                 sectors = bkey_start_offset(&new->k) -
1390                                         old.k->p.offset;
1391                                 break;
1392                         case BCH_EXTENT_OVERLAP_FRONT:
1393                                 offset = 0;
1394                                 sectors = bkey_start_offset(old.k) -
1395                                         new->k.p.offset;
1396                                 break;
1397                         case BCH_EXTENT_OVERLAP_MIDDLE:
1398                                 offset = bkey_start_offset(&new->k) -
1399                                         bkey_start_offset(old.k);
1400                                 sectors = -((s64) new->k.size);
1401                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1402                                 break;
1403                         }
1404
1405                         BUG_ON(sectors >= 0);
1406
1407                         ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1408                                         offset, sectors, fs_usage,
1409                                         trans->journal_res.seq, flags) ?: 1;
1410                         if (ret <= 0)
1411                                 break;
1412
1413                         bch2_btree_node_iter_advance(&node_iter, b);
1414                 }
1415         }
1416
1417         return ret;
1418 }
1419
1420 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1421                                struct bch_fs_usage *fs_usage)
1422 {
1423         struct bch_fs *c = trans->c;
1424         struct btree_insert_entry *i;
1425         static int warned_disk_usage = 0;
1426         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1427         char buf[200];
1428
1429         if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1430                                  trans->journal_res.seq) ||
1431             warned_disk_usage ||
1432             xchg(&warned_disk_usage, 1))
1433                 return;
1434
1435         bch_err(c, "disk usage increased more than %llu sectors reserved",
1436                 disk_res_sectors);
1437
1438         trans_for_each_update(trans, i) {
1439                 pr_err("while inserting");
1440                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1441                 pr_err("%s", buf);
1442                 pr_err("overlapping with");
1443
1444                 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1445                         struct btree            *b = iter_l(i->iter)->b;
1446                         struct btree_node_iter  node_iter = iter_l(i->iter)->iter;
1447                         struct bkey_packed      *_k;
1448
1449                         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1450                                 struct bkey             unpacked;
1451                                 struct bkey_s_c         k;
1452
1453                                 pr_info("_k %px format %u", _k, _k->format);
1454                                 k = bkey_disassemble(b, _k, &unpacked);
1455
1456                                 if (btree_node_is_extents(b)
1457                                     ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1458                                     : bkey_cmp(i->k->k.p, k.k->p))
1459                                         break;
1460
1461                                 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1462                                 pr_err("%s", buf);
1463
1464                                 bch2_btree_node_iter_advance(&node_iter, b);
1465                         }
1466                 } else {
1467                         struct bkey_cached *ck = (void *) i->iter->l[0].b;
1468
1469                         if (ck->valid) {
1470                                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1471                                 pr_err("%s", buf);
1472                         }
1473                 }
1474         }
1475 }
1476
1477 /* trans_mark: */
1478
1479 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1480                             enum btree_id btree_id, struct bpos pos,
1481                             struct bkey_s_c *k)
1482 {
1483         struct btree_insert_entry *i;
1484
1485         trans_for_each_update(trans, i)
1486                 if (i->iter->btree_id == btree_id &&
1487                     (btree_node_type_is_extents(btree_id)
1488                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1489                        bkey_cmp(pos, i->k->k.p) < 0
1490                      : !bkey_cmp(pos, i->iter->pos))) {
1491                         *k = bkey_i_to_s_c(i->k);
1492                         return i->iter;
1493                 }
1494
1495         return NULL;
1496 }
1497
1498 static int trans_get_key(struct btree_trans *trans,
1499                          enum btree_id btree_id, struct bpos pos,
1500                          struct btree_iter **iter,
1501                          struct bkey_s_c *k)
1502 {
1503         unsigned flags = btree_id != BTREE_ID_ALLOC
1504                 ? BTREE_ITER_SLOTS
1505                 : BTREE_ITER_CACHED;
1506         int ret;
1507
1508         *iter = trans_get_update(trans, btree_id, pos, k);
1509         if (*iter)
1510                 return 1;
1511
1512         *iter = bch2_trans_get_iter(trans, btree_id, pos,
1513                                     flags|BTREE_ITER_INTENT);
1514         *k = __bch2_btree_iter_peek(*iter, flags);
1515         ret = bkey_err(*k);
1516         if (ret)
1517                 bch2_trans_iter_put(trans, *iter);
1518         return ret;
1519 }
1520
1521 static struct bkey_alloc_buf *
1522 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter,
1523                               const struct bch_extent_ptr *ptr,
1524                               struct bkey_alloc_unpacked *u)
1525 {
1526         struct bch_fs *c = trans->c;
1527         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1528         struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1529         struct bucket *g;
1530         struct btree_iter *iter;
1531         struct bkey_s_c k;
1532         struct bkey_alloc_buf *a;
1533         int ret;
1534
1535         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
1536         if (IS_ERR(a))
1537                 return a;
1538
1539         iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k);
1540         if (iter) {
1541                 *u = bch2_alloc_unpack(k);
1542         } else {
1543                 iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
1544                                            BTREE_ITER_CACHED|
1545                                            BTREE_ITER_CACHED_NOFILL|
1546                                            BTREE_ITER_INTENT);
1547                 ret = bch2_btree_iter_traverse(iter);
1548                 if (ret) {
1549                         bch2_trans_iter_put(trans, iter);
1550                         return ERR_PTR(ret);
1551                 }
1552
1553                 percpu_down_read(&c->mark_lock);
1554                 g = bucket(ca, pos.offset);
1555                 *u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
1556                 percpu_up_read(&c->mark_lock);
1557         }
1558
1559         *_iter = iter;
1560         return a;
1561 }
1562
1563 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1564                         struct bkey_s_c k, struct extent_ptr_decoded p,
1565                         s64 sectors, enum bch_data_type data_type)
1566 {
1567         struct bch_fs *c = trans->c;
1568         struct btree_iter *iter;
1569         struct bkey_alloc_unpacked u;
1570         struct bkey_alloc_buf *a;
1571         int ret;
1572
1573         a = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1574         if (IS_ERR(a))
1575                 return PTR_ERR(a);
1576
1577         ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
1578                              &u.dirty_sectors, &u.cached_sectors);
1579         if (ret)
1580                 goto out;
1581
1582         bch2_alloc_pack(c, a, u);
1583         bch2_trans_update(trans, iter, &a->k, 0);
1584 out:
1585         bch2_trans_iter_put(trans, iter);
1586         return ret;
1587 }
1588
1589 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1590                         struct extent_ptr_decoded p,
1591                         s64 sectors, enum bch_data_type data_type)
1592 {
1593         struct bch_fs *c = trans->c;
1594         struct btree_iter *iter;
1595         struct bkey_s_c k;
1596         struct bkey_i_stripe *s;
1597         struct bch_replicas_padded r;
1598         int ret = 0;
1599
1600         ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.ec.idx), &iter, &k);
1601         if (ret < 0)
1602                 return ret;
1603
1604         if (k.k->type != KEY_TYPE_stripe) {
1605                 bch2_fs_inconsistent(c,
1606                         "pointer to nonexistent stripe %llu",
1607                         (u64) p.ec.idx);
1608                 ret = -EIO;
1609                 goto out;
1610         }
1611
1612         if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1613                 bch2_fs_inconsistent(c,
1614                         "stripe pointer doesn't match stripe %llu",
1615                         (u64) p.ec.idx);
1616                 ret = -EIO;
1617                 goto out;
1618         }
1619
1620         s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1621         ret = PTR_ERR_OR_ZERO(s);
1622         if (ret)
1623                 goto out;
1624
1625         bkey_reassemble(&s->k_i, k);
1626         stripe_blockcount_set(&s->v, p.ec.block,
1627                 stripe_blockcount_get(&s->v, p.ec.block) +
1628                 sectors);
1629         bch2_trans_update(trans, iter, &s->k_i, 0);
1630
1631         bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1632         r.e.data_type = data_type;
1633         update_replicas_list(trans, &r.e, sectors);
1634 out:
1635         bch2_trans_iter_put(trans, iter);
1636         return ret;
1637 }
1638
1639 static int bch2_trans_mark_extent(struct btree_trans *trans,
1640                         struct bkey_s_c k, unsigned offset,
1641                         s64 sectors, unsigned flags,
1642                         enum bch_data_type data_type)
1643 {
1644         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1645         const union bch_extent_entry *entry;
1646         struct extent_ptr_decoded p;
1647         struct bch_replicas_padded r;
1648         s64 dirty_sectors = 0;
1649         bool stale;
1650         int ret;
1651
1652         r.e.data_type   = data_type;
1653         r.e.nr_devs     = 0;
1654         r.e.nr_required = 1;
1655
1656         BUG_ON(!sectors);
1657
1658         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1659                 s64 disk_sectors = data_type == BCH_DATA_btree
1660                         ? sectors
1661                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1662
1663                 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1664                                               data_type);
1665                 if (ret < 0)
1666                         return ret;
1667
1668                 stale = ret > 0;
1669
1670                 if (p.ptr.cached) {
1671                         if (!stale)
1672                                 update_cached_sectors_list(trans, p.ptr.dev,
1673                                                            disk_sectors);
1674                 } else if (!p.has_ec) {
1675                         dirty_sectors          += disk_sectors;
1676                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1677                 } else {
1678                         ret = bch2_trans_mark_stripe_ptr(trans, p,
1679                                         disk_sectors, data_type);
1680                         if (ret)
1681                                 return ret;
1682
1683                         r.e.nr_required = 0;
1684                 }
1685         }
1686
1687         if (r.e.nr_devs)
1688                 update_replicas_list(trans, &r.e, dirty_sectors);
1689
1690         return 0;
1691 }
1692
1693 static int bch2_trans_mark_stripe_alloc_ref(struct btree_trans *trans,
1694                                             struct bkey_s_c_stripe s,
1695                                             unsigned idx, bool deleting)
1696 {
1697         struct bch_fs *c = trans->c;
1698         const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1699         struct bkey_alloc_buf *a;
1700         struct btree_iter *iter;
1701         struct bkey_alloc_unpacked u;
1702         bool parity = idx >= s.v->nr_blocks - s.v->nr_redundant;
1703         int ret = 0;
1704
1705         a = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
1706         if (IS_ERR(a))
1707                 return PTR_ERR(a);
1708
1709         if (parity) {
1710                 s64 sectors = le16_to_cpu(s.v->sectors);
1711
1712                 if (deleting)
1713                         sectors = -sectors;
1714
1715                 u.dirty_sectors += sectors;
1716                 u.data_type = u.dirty_sectors
1717                         ? BCH_DATA_parity
1718                         : 0;
1719         }
1720
1721         if (!deleting) {
1722                 if (bch2_fs_inconsistent_on(u.stripe && u.stripe != s.k->p.offset, c,
1723                                 "bucket %llu:%llu gen %u: multiple stripes using same bucket (%u, %llu)",
1724                                 iter->pos.inode, iter->pos.offset, u.gen,
1725                                 u.stripe, s.k->p.offset)) {
1726                         ret = -EIO;
1727                         goto err;
1728                 }
1729
1730                 u.stripe                = s.k->p.offset;
1731                 u.stripe_redundancy     = s.v->nr_redundant;
1732         } else {
1733                 u.stripe                = 0;
1734                 u.stripe_redundancy     = 0;
1735         }
1736
1737         bch2_alloc_pack(c, a, u);
1738         bch2_trans_update(trans, iter, &a->k, 0);
1739 err:
1740         bch2_trans_iter_put(trans, iter);
1741         return ret;
1742 }
1743
1744 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1745                                   struct bkey_s_c old, struct bkey_s_c new,
1746                                   unsigned flags)
1747 {
1748         struct bkey_s_c_stripe old_s = { NULL };
1749         struct bkey_s_c_stripe new_s = { NULL };
1750         struct bch_replicas_padded r;
1751         unsigned i;
1752         int ret = 0;
1753
1754         if (old.k->type == KEY_TYPE_stripe)
1755                 old_s = bkey_s_c_to_stripe(old);
1756         if (new.k->type == KEY_TYPE_stripe)
1757                 new_s = bkey_s_c_to_stripe(new);
1758
1759         /*
1760          * If the pointers aren't changing, we don't need to do anything:
1761          */
1762         if (new_s.k && old_s.k &&
1763             new_s.v->nr_blocks          == old_s.v->nr_blocks &&
1764             new_s.v->nr_redundant       == old_s.v->nr_redundant &&
1765             !memcmp(old_s.v->ptrs, new_s.v->ptrs,
1766                     new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
1767                 return 0;
1768
1769         if (new_s.k) {
1770                 s64 sectors = le16_to_cpu(new_s.v->sectors);
1771
1772                 bch2_bkey_to_replicas(&r.e, new);
1773                 update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
1774
1775                 for (i = 0; i < new_s.v->nr_blocks; i++) {
1776                         ret = bch2_trans_mark_stripe_alloc_ref(trans, new_s,
1777                                                                i, false);
1778                         if (ret)
1779                                 return ret;
1780                 }
1781         }
1782
1783         if (old_s.k) {
1784                 s64 sectors = -((s64) le16_to_cpu(old_s.v->sectors));
1785
1786                 bch2_bkey_to_replicas(&r.e, old);
1787                 update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
1788
1789                 for (i = 0; i < old_s.v->nr_blocks; i++) {
1790                         ret = bch2_trans_mark_stripe_alloc_ref(trans, old_s,
1791                                                                i, true);
1792                         if (ret)
1793                                 return ret;
1794                 }
1795         }
1796
1797         return ret;
1798 }
1799
1800 static __le64 *bkey_refcount(struct bkey_i *k)
1801 {
1802         switch (k->k.type) {
1803         case KEY_TYPE_reflink_v:
1804                 return &bkey_i_to_reflink_v(k)->v.refcount;
1805         case KEY_TYPE_indirect_inline_data:
1806                 return &bkey_i_to_indirect_inline_data(k)->v.refcount;
1807         default:
1808                 return NULL;
1809         }
1810 }
1811
1812 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1813                         struct bkey_s_c_reflink_p p,
1814                         u64 idx, unsigned sectors,
1815                         unsigned flags)
1816 {
1817         struct bch_fs *c = trans->c;
1818         struct btree_iter *iter;
1819         struct bkey_s_c k;
1820         struct bkey_i *n;
1821         __le64 *refcount;
1822         s64 ret;
1823
1824         ret = trans_get_key(trans, BTREE_ID_REFLINK,
1825                             POS(0, idx), &iter, &k);
1826         if (ret < 0)
1827                 return ret;
1828
1829         if ((flags & BTREE_TRIGGER_OVERWRITE) &&
1830             (bkey_start_offset(k.k) < idx ||
1831              k.k->p.offset > idx + sectors))
1832                 goto out;
1833
1834         sectors = k.k->p.offset - idx;
1835
1836         n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1837         ret = PTR_ERR_OR_ZERO(n);
1838         if (ret)
1839                 goto err;
1840
1841         bkey_reassemble(n, k);
1842
1843         refcount = bkey_refcount(n);
1844         if (!refcount) {
1845                 bch2_fs_inconsistent(c,
1846                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1847                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1848                 ret = -EIO;
1849                 goto err;
1850         }
1851
1852         le64_add_cpu(refcount, !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
1853
1854         if (!*refcount) {
1855                 n->k.type = KEY_TYPE_deleted;
1856                 set_bkey_val_u64s(&n->k, 0);
1857         }
1858
1859         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1860         BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1861
1862         bch2_trans_update(trans, iter, n, 0);
1863 out:
1864         ret = sectors;
1865 err:
1866         bch2_trans_iter_put(trans, iter);
1867         return ret;
1868 }
1869
1870 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1871                         struct bkey_s_c_reflink_p p, unsigned offset,
1872                         s64 sectors, unsigned flags)
1873 {
1874         u64 idx = le64_to_cpu(p.v->idx) + offset;
1875         s64 ret = 0;
1876
1877         sectors = abs(sectors);
1878         BUG_ON(offset + sectors > p.k->size);
1879
1880         while (sectors) {
1881                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1882                 if (ret < 0)
1883                         break;
1884
1885                 idx += ret;
1886                 sectors = max_t(s64, 0LL, sectors - ret);
1887                 ret = 0;
1888         }
1889
1890         return ret;
1891 }
1892
1893 int bch2_trans_mark_key(struct btree_trans *trans,
1894                         struct bkey_s_c old,
1895                         struct bkey_s_c new,
1896                         unsigned offset, s64 sectors, unsigned flags)
1897 {
1898         struct bch_fs *c = trans->c;
1899         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1900         struct replicas_delta_list *d;
1901
1902         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1903
1904         switch (k.k->type) {
1905         case KEY_TYPE_btree_ptr:
1906         case KEY_TYPE_btree_ptr_v2:
1907                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1908                         ?  c->opts.btree_node_size
1909                         : -c->opts.btree_node_size;
1910
1911                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1912                                               flags, BCH_DATA_btree);
1913         case KEY_TYPE_extent:
1914         case KEY_TYPE_reflink_v:
1915                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1916                                               flags, BCH_DATA_user);
1917         case KEY_TYPE_stripe:
1918                 return bch2_trans_mark_stripe(trans, old, new, flags);
1919         case KEY_TYPE_inode: {
1920                 int nr = (new.k->type == KEY_TYPE_inode) -
1921                          (old.k->type == KEY_TYPE_inode);
1922
1923                 if (nr) {
1924                         d = replicas_deltas_realloc(trans, 0);
1925                         d->nr_inodes += nr;
1926                 }
1927
1928                 return 0;
1929         }
1930         case KEY_TYPE_reservation: {
1931                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1932
1933                 d = replicas_deltas_realloc(trans, 0);
1934
1935                 sectors *= replicas;
1936                 replicas = clamp_t(unsigned, replicas, 1,
1937                                    ARRAY_SIZE(d->persistent_reserved));
1938
1939                 d->persistent_reserved[replicas - 1] += sectors;
1940                 return 0;
1941         }
1942         case KEY_TYPE_reflink_p:
1943                 return bch2_trans_mark_reflink_p(trans,
1944                                         bkey_s_c_to_reflink_p(k),
1945                                         offset, sectors, flags);
1946         default:
1947                 return 0;
1948         }
1949 }
1950
1951 int bch2_trans_mark_update(struct btree_trans *trans,
1952                            struct btree_iter *iter,
1953                            struct bkey_i *new,
1954                            unsigned flags)
1955 {
1956         struct bkey_s_c old;
1957         int ret;
1958
1959         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1960                 return 0;
1961
1962         if (!btree_node_type_needs_gc(iter->btree_id))
1963                 return 0;
1964
1965         if (!btree_node_type_is_extents(iter->btree_id)) {
1966                 /* iterators should be uptodate, shouldn't get errors here: */
1967                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1968                         old = bch2_btree_iter_peek_slot(iter);
1969                         BUG_ON(bkey_err(old));
1970                 } else {
1971                         struct bkey_cached *ck = (void *) iter->l[0].b;
1972
1973                         BUG_ON(!ck->valid);
1974                         old = bkey_i_to_s_c(ck->k);
1975                 }
1976
1977                 if (old.k->type == new->k.type) {
1978                         ret   = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
1979                                         BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1980                 } else {
1981                         ret   = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
1982                                         BTREE_TRIGGER_INSERT|flags) ?:
1983                                 bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
1984                                         BTREE_TRIGGER_OVERWRITE|flags);
1985                 }
1986         } else {
1987                 struct btree            *b = iter_l(iter)->b;
1988                 struct btree_node_iter  node_iter = iter_l(iter)->iter;
1989                 struct bkey_packed      *_old;
1990                 struct bkey             unpacked;
1991
1992                 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1993
1994                 bkey_init(&unpacked);
1995                 old = (struct bkey_s_c) { &unpacked, NULL };
1996
1997                 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
1998                                           0, new->k.size,
1999                                           BTREE_TRIGGER_INSERT);
2000                 if (ret)
2001                         return ret;
2002
2003                 while ((_old = bch2_btree_node_iter_peek(&node_iter, b))) {
2004                         unsigned flags = BTREE_TRIGGER_OVERWRITE;
2005                         unsigned offset = 0;
2006                         s64 sectors;
2007
2008                         old = bkey_disassemble(b, _old, &unpacked);
2009                         sectors = -((s64) old.k->size);
2010
2011                         flags |= BTREE_TRIGGER_OVERWRITE;
2012
2013                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
2014                                 return 0;
2015
2016                         switch (bch2_extent_overlap(&new->k, old.k)) {
2017                         case BCH_EXTENT_OVERLAP_ALL:
2018                                 offset = 0;
2019                                 sectors = -((s64) old.k->size);
2020                                 break;
2021                         case BCH_EXTENT_OVERLAP_BACK:
2022                                 offset = bkey_start_offset(&new->k) -
2023                                         bkey_start_offset(old.k);
2024                                 sectors = bkey_start_offset(&new->k) -
2025                                         old.k->p.offset;
2026                                 break;
2027                         case BCH_EXTENT_OVERLAP_FRONT:
2028                                 offset = 0;
2029                                 sectors = bkey_start_offset(old.k) -
2030                                         new->k.p.offset;
2031                                 break;
2032                         case BCH_EXTENT_OVERLAP_MIDDLE:
2033                                 offset = bkey_start_offset(&new->k) -
2034                                         bkey_start_offset(old.k);
2035                                 sectors = -((s64) new->k.size);
2036                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
2037                                 break;
2038                         }
2039
2040                         BUG_ON(sectors >= 0);
2041
2042                         ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
2043                                         offset, sectors, flags);
2044                         if (ret)
2045                                 return ret;
2046
2047                         bch2_btree_node_iter_advance(&node_iter, b);
2048                 }
2049         }
2050
2051         return ret;
2052 }
2053
2054 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
2055                                     struct bch_dev *ca, size_t b,
2056                                     enum bch_data_type type,
2057                                     unsigned sectors)
2058 {
2059         struct bch_fs *c = trans->c;
2060         struct btree_iter *iter;
2061         struct bkey_alloc_unpacked u;
2062         struct bkey_alloc_buf *a;
2063         struct bch_extent_ptr ptr = {
2064                 .dev = ca->dev_idx,
2065                 .offset = bucket_to_sector(ca, b),
2066         };
2067         int ret = 0;
2068
2069         a = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
2070         if (IS_ERR(a))
2071                 return PTR_ERR(a);
2072
2073         if (u.data_type && u.data_type != type) {
2074                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
2075                         "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
2076                         "while marking %s",
2077                         iter->pos.inode, iter->pos.offset, u.gen,
2078                         bch2_data_types[u.data_type],
2079                         bch2_data_types[type],
2080                         bch2_data_types[type]);
2081                 ret = -EIO;
2082                 goto out;
2083         }
2084
2085         if ((unsigned) (u.dirty_sectors + sectors) > ca->mi.bucket_size) {
2086                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
2087                         "bucket %llu:%llu gen %u data type %s sector count overflow: %u + %u > %u\n"
2088                         "while marking %s",
2089                         iter->pos.inode, iter->pos.offset, u.gen,
2090                         bch2_data_types[u.data_type ?: type],
2091                         u.dirty_sectors, sectors, ca->mi.bucket_size,
2092                         bch2_data_types[type]);
2093                 ret = -EIO;
2094                 goto out;
2095         }
2096
2097         if (u.data_type         == type &&
2098             u.dirty_sectors     == sectors)
2099                 goto out;
2100
2101         u.data_type     = type;
2102         u.dirty_sectors = sectors;
2103
2104         bch2_alloc_pack(c, a, u);
2105         bch2_trans_update(trans, iter, &a->k, 0);
2106 out:
2107         bch2_trans_iter_put(trans, iter);
2108         return ret;
2109 }
2110
2111 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
2112                                     struct disk_reservation *res,
2113                                     struct bch_dev *ca, size_t b,
2114                                     enum bch_data_type type,
2115                                     unsigned sectors)
2116 {
2117         return __bch2_trans_do(trans, res, NULL, 0,
2118                         __bch2_trans_mark_metadata_bucket(trans, ca, b, BCH_DATA_journal,
2119                                                         ca->mi.bucket_size));
2120
2121 }
2122
2123 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
2124                                             struct disk_reservation *res,
2125                                             struct bch_dev *ca,
2126                                             u64 start, u64 end,
2127                                             enum bch_data_type type,
2128                                             u64 *bucket, unsigned *bucket_sectors)
2129 {
2130         int ret;
2131
2132         do {
2133                 u64 b = sector_to_bucket(ca, start);
2134                 unsigned sectors =
2135                         min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
2136
2137                 if (b != *bucket) {
2138                         if (*bucket_sectors) {
2139                                 ret = bch2_trans_mark_metadata_bucket(trans, res, ca,
2140                                                 *bucket, type, *bucket_sectors);
2141                                 if (ret)
2142                                         return ret;
2143                         }
2144
2145                         *bucket         = b;
2146                         *bucket_sectors = 0;
2147                 }
2148
2149                 *bucket_sectors += sectors;
2150                 start += sectors;
2151         } while (!ret && start < end);
2152
2153         return 0;
2154 }
2155
2156 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
2157                              struct disk_reservation *res,
2158                              struct bch_dev *ca)
2159 {
2160         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
2161         u64 bucket = 0;
2162         unsigned i, bucket_sectors = 0;
2163         int ret;
2164
2165         for (i = 0; i < layout->nr_superblocks; i++) {
2166                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
2167
2168                 if (offset == BCH_SB_SECTOR) {
2169                         ret = bch2_trans_mark_metadata_sectors(trans, res, ca,
2170                                                 0, BCH_SB_SECTOR,
2171                                                 BCH_DATA_sb, &bucket, &bucket_sectors);
2172                         if (ret)
2173                                 return ret;
2174                 }
2175
2176                 ret = bch2_trans_mark_metadata_sectors(trans, res, ca, offset,
2177                                       offset + (1 << layout->sb_max_size_bits),
2178                                       BCH_DATA_sb, &bucket, &bucket_sectors);
2179                 if (ret)
2180                         return ret;
2181         }
2182
2183         if (bucket_sectors) {
2184                 ret = bch2_trans_mark_metadata_bucket(trans, res, ca,
2185                                 bucket, BCH_DATA_sb, bucket_sectors);
2186                 if (ret)
2187                         return ret;
2188         }
2189
2190         for (i = 0; i < ca->journal.nr; i++) {
2191                 ret = bch2_trans_mark_metadata_bucket(trans, res, ca,
2192                                 ca->journal.buckets[i],
2193                                 BCH_DATA_journal, ca->mi.bucket_size);
2194                 if (ret)
2195                         return ret;
2196         }
2197
2198         return 0;
2199 }
2200
2201 int bch2_trans_mark_dev_sb(struct bch_fs *c,
2202                            struct disk_reservation *res,
2203                            struct bch_dev *ca)
2204 {
2205         return bch2_trans_do(c, res, NULL, 0,
2206                         __bch2_trans_mark_dev_sb(&trans, res, ca));
2207 }
2208
2209 /* Disk reservations: */
2210
2211 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
2212 {
2213         percpu_down_read(&c->mark_lock);
2214         this_cpu_sub(c->usage[0]->online_reserved,
2215                      res->sectors);
2216         percpu_up_read(&c->mark_lock);
2217
2218         res->sectors = 0;
2219 }
2220
2221 #define SECTORS_CACHE   1024
2222
2223 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2224                               u64 sectors, int flags)
2225 {
2226         struct bch_fs_pcpu *pcpu;
2227         u64 old, v, get;
2228         s64 sectors_available;
2229         int ret;
2230
2231         percpu_down_read(&c->mark_lock);
2232         preempt_disable();
2233         pcpu = this_cpu_ptr(c->pcpu);
2234
2235         if (sectors <= pcpu->sectors_available)
2236                 goto out;
2237
2238         v = atomic64_read(&c->sectors_available);
2239         do {
2240                 old = v;
2241                 get = min((u64) sectors + SECTORS_CACHE, old);
2242
2243                 if (get < sectors) {
2244                         preempt_enable();
2245                         goto recalculate;
2246                 }
2247         } while ((v = atomic64_cmpxchg(&c->sectors_available,
2248                                        old, old - get)) != old);
2249
2250         pcpu->sectors_available         += get;
2251
2252 out:
2253         pcpu->sectors_available         -= sectors;
2254         this_cpu_add(c->usage[0]->online_reserved, sectors);
2255         res->sectors                    += sectors;
2256
2257         preempt_enable();
2258         percpu_up_read(&c->mark_lock);
2259         return 0;
2260
2261 recalculate:
2262         mutex_lock(&c->sectors_available_lock);
2263
2264         percpu_u64_set(&c->pcpu->sectors_available, 0);
2265         sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2266
2267         if (sectors <= sectors_available ||
2268             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2269                 atomic64_set(&c->sectors_available,
2270                              max_t(s64, 0, sectors_available - sectors));
2271                 this_cpu_add(c->usage[0]->online_reserved, sectors);
2272                 res->sectors                    += sectors;
2273                 ret = 0;
2274         } else {
2275                 atomic64_set(&c->sectors_available, sectors_available);
2276                 ret = -ENOSPC;
2277         }
2278
2279         mutex_unlock(&c->sectors_available_lock);
2280         percpu_up_read(&c->mark_lock);
2281
2282         return ret;
2283 }
2284
2285 /* Startup/shutdown: */
2286
2287 static void buckets_free_rcu(struct rcu_head *rcu)
2288 {
2289         struct bucket_array *buckets =
2290                 container_of(rcu, struct bucket_array, rcu);
2291
2292         kvpfree(buckets,
2293                 sizeof(struct bucket_array) +
2294                 buckets->nbuckets * sizeof(struct bucket));
2295 }
2296
2297 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2298 {
2299         struct bucket_array *buckets = NULL, *old_buckets = NULL;
2300         unsigned long *buckets_nouse = NULL;
2301         alloc_fifo      free[RESERVE_NR];
2302         alloc_fifo      free_inc;
2303         alloc_heap      alloc_heap;
2304
2305         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2306                              ca->mi.bucket_size / c->opts.btree_node_size);
2307         /* XXX: these should be tunable */
2308         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
2309         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 6);
2310         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
2311                                       btree_reserve * 2);
2312         bool resize = ca->buckets[0] != NULL;
2313         int ret = -ENOMEM;
2314         unsigned i;
2315
2316         memset(&free,           0, sizeof(free));
2317         memset(&free_inc,       0, sizeof(free_inc));
2318         memset(&alloc_heap,     0, sizeof(alloc_heap));
2319
2320         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
2321                                             nbuckets * sizeof(struct bucket),
2322                                             GFP_KERNEL|__GFP_ZERO)) ||
2323             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2324                                             sizeof(unsigned long),
2325                                             GFP_KERNEL|__GFP_ZERO)) ||
2326             !init_fifo(&free[RESERVE_MOVINGGC],
2327                        copygc_reserve, GFP_KERNEL) ||
2328             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2329             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
2330             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2331                 goto err;
2332
2333         buckets->first_bucket   = ca->mi.first_bucket;
2334         buckets->nbuckets       = nbuckets;
2335
2336         bch2_copygc_stop(c);
2337
2338         if (resize) {
2339                 down_write(&c->gc_lock);
2340                 down_write(&ca->bucket_lock);
2341                 percpu_down_write(&c->mark_lock);
2342         }
2343
2344         old_buckets = bucket_array(ca);
2345
2346         if (resize) {
2347                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2348
2349                 memcpy(buckets->b,
2350                        old_buckets->b,
2351                        n * sizeof(struct bucket));
2352                 memcpy(buckets_nouse,
2353                        ca->buckets_nouse,
2354                        BITS_TO_LONGS(n) * sizeof(unsigned long));
2355         }
2356
2357         rcu_assign_pointer(ca->buckets[0], buckets);
2358         buckets = old_buckets;
2359
2360         swap(ca->buckets_nouse, buckets_nouse);
2361
2362         if (resize) {
2363                 percpu_up_write(&c->mark_lock);
2364                 up_write(&c->gc_lock);
2365         }
2366
2367         spin_lock(&c->freelist_lock);
2368         for (i = 0; i < RESERVE_NR; i++) {
2369                 fifo_move(&free[i], &ca->free[i]);
2370                 swap(ca->free[i], free[i]);
2371         }
2372         fifo_move(&free_inc, &ca->free_inc);
2373         swap(ca->free_inc, free_inc);
2374         spin_unlock(&c->freelist_lock);
2375
2376         /* with gc lock held, alloc_heap can't be in use: */
2377         swap(ca->alloc_heap, alloc_heap);
2378
2379         nbuckets = ca->mi.nbuckets;
2380
2381         if (resize)
2382                 up_write(&ca->bucket_lock);
2383
2384         ret = 0;
2385 err:
2386         free_heap(&alloc_heap);
2387         free_fifo(&free_inc);
2388         for (i = 0; i < RESERVE_NR; i++)
2389                 free_fifo(&free[i]);
2390         kvpfree(buckets_nouse,
2391                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2392         if (buckets)
2393                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2394
2395         return ret;
2396 }
2397
2398 void bch2_dev_buckets_free(struct bch_dev *ca)
2399 {
2400         unsigned i;
2401
2402         free_heap(&ca->alloc_heap);
2403         free_fifo(&ca->free_inc);
2404         for (i = 0; i < RESERVE_NR; i++)
2405                 free_fifo(&ca->free[i]);
2406         kvpfree(ca->buckets_nouse,
2407                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2408         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2409                 sizeof(struct bucket_array) +
2410                 ca->mi.nbuckets * sizeof(struct bucket));
2411
2412         for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2413                 free_percpu(ca->usage[i]);
2414         kfree(ca->usage_base);
2415 }
2416
2417 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2418 {
2419         unsigned i;
2420
2421         ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2422         if (!ca->usage_base)
2423                 return -ENOMEM;
2424
2425         for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2426                 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2427                 if (!ca->usage[i])
2428                         return -ENOMEM;
2429         }
2430
2431         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2432 }