]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets.c
Update bcachefs sources to 0cd3e1d27a bcachefs: Fix for bch2_bkey_pack_pos() not...
[bcachefs-tools-debian] / libbcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "bset.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "ec.h"
15 #include "error.h"
16 #include "movinggc.h"
17 #include "replicas.h"
18
19 #include <linux/preempt.h>
20 #include <trace/events/bcachefs.h>
21
22 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
23                                               enum bch_data_type data_type,
24                                               s64 sectors)
25 {
26         switch (data_type) {
27         case BCH_DATA_btree:
28                 fs_usage->btree         += sectors;
29                 break;
30         case BCH_DATA_user:
31         case BCH_DATA_parity:
32                 fs_usage->data          += sectors;
33                 break;
34         case BCH_DATA_cached:
35                 fs_usage->cached        += sectors;
36                 break;
37         default:
38                 break;
39         }
40 }
41
42 /*
43  * Clear journal_seq_valid for buckets for which it's not needed, to prevent
44  * wraparound:
45  */
46 void bch2_bucket_seq_cleanup(struct bch_fs *c)
47 {
48         u64 journal_seq = atomic64_read(&c->journal.seq);
49         u16 last_seq_ondisk = c->journal.last_seq_ondisk;
50         struct bch_dev *ca;
51         struct bucket_array *buckets;
52         struct bucket *g;
53         struct bucket_mark m;
54         unsigned i;
55
56         if (journal_seq - c->last_bucket_seq_cleanup <
57             (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
58                 return;
59
60         c->last_bucket_seq_cleanup = journal_seq;
61
62         for_each_member_device(ca, c, i) {
63                 down_read(&ca->bucket_lock);
64                 buckets = bucket_array(ca);
65
66                 for_each_bucket(g, buckets) {
67                         bucket_cmpxchg(g, m, ({
68                                 if (!m.journal_seq_valid ||
69                                     bucket_needs_journal_commit(m, last_seq_ondisk))
70                                         break;
71
72                                 m.journal_seq_valid = 0;
73                         }));
74                 }
75                 up_read(&ca->bucket_lock);
76         }
77 }
78
79 void bch2_fs_usage_initialize(struct bch_fs *c)
80 {
81         struct bch_fs_usage *usage;
82         struct bch_dev *ca;
83         unsigned i;
84
85         percpu_down_write(&c->mark_lock);
86         usage = c->usage_base;
87
88         for (i = 0; i < ARRAY_SIZE(c->usage); i++)
89                 bch2_fs_usage_acc_to_base(c, i);
90
91         for (i = 0; i < BCH_REPLICAS_MAX; i++)
92                 usage->reserved += usage->persistent_reserved[i];
93
94         for (i = 0; i < c->replicas.nr; i++) {
95                 struct bch_replicas_entry *e =
96                         cpu_replicas_entry(&c->replicas, i);
97
98                 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
99         }
100
101         for_each_member_device(ca, c, i) {
102                 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
103
104                 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
105                                   dev.d[BCH_DATA_journal].buckets) *
106                         ca->mi.bucket_size;
107         }
108
109         percpu_up_write(&c->mark_lock);
110 }
111
112 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
113                                                   unsigned journal_seq,
114                                                   bool gc)
115 {
116         return this_cpu_ptr(gc
117                             ? ca->usage_gc
118                             : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
119 }
120
121 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
122 {
123         struct bch_fs *c = ca->fs;
124         struct bch_dev_usage ret;
125         unsigned seq, i, u64s = dev_usage_u64s();
126
127         do {
128                 seq = read_seqcount_begin(&c->usage_lock);
129                 memcpy(&ret, ca->usage_base, u64s * sizeof(u64));
130                 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
131                         acc_u64s_percpu((u64 *) &ret, (u64 __percpu *) ca->usage[i], u64s);
132         } while (read_seqcount_retry(&c->usage_lock, seq));
133
134         return ret;
135 }
136
137 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
138                                                 unsigned journal_seq,
139                                                 bool gc)
140 {
141         return this_cpu_ptr(gc
142                             ? c->usage_gc
143                             : c->usage[journal_seq & JOURNAL_BUF_MASK]);
144 }
145
146 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
147 {
148         ssize_t offset = v - (u64 *) c->usage_base;
149         unsigned i, seq;
150         u64 ret;
151
152         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
153         percpu_rwsem_assert_held(&c->mark_lock);
154
155         do {
156                 seq = read_seqcount_begin(&c->usage_lock);
157                 ret = *v;
158
159                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
160                         ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
161         } while (read_seqcount_retry(&c->usage_lock, seq));
162
163         return ret;
164 }
165
166 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
167 {
168         struct bch_fs_usage_online *ret;
169         unsigned seq, i, u64s;
170
171         percpu_down_read(&c->mark_lock);
172
173         ret = kmalloc(sizeof(struct bch_fs_usage_online) +
174                       sizeof(u64) * c->replicas.nr, GFP_NOFS);
175         if (unlikely(!ret)) {
176                 percpu_up_read(&c->mark_lock);
177                 return NULL;
178         }
179
180         ret->online_reserved = percpu_u64_get(c->online_reserved);
181
182         u64s = fs_usage_u64s(c);
183         do {
184                 seq = read_seqcount_begin(&c->usage_lock);
185                 memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
186                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
187                         acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
188         } while (read_seqcount_retry(&c->usage_lock, seq));
189
190         return ret;
191 }
192
193 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
194 {
195         struct bch_dev *ca;
196         unsigned i, u64s = fs_usage_u64s(c);
197
198         BUG_ON(idx >= ARRAY_SIZE(c->usage));
199
200         preempt_disable();
201         write_seqcount_begin(&c->usage_lock);
202
203         acc_u64s_percpu((u64 *) c->usage_base,
204                         (u64 __percpu *) c->usage[idx], u64s);
205         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
206
207         rcu_read_lock();
208         for_each_member_device_rcu(ca, c, i, NULL) {
209                 u64s = dev_usage_u64s();
210
211                 acc_u64s_percpu((u64 *) ca->usage_base,
212                                 (u64 __percpu *) ca->usage[idx], u64s);
213                 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
214         }
215         rcu_read_unlock();
216
217         write_seqcount_end(&c->usage_lock);
218         preempt_enable();
219 }
220
221 void bch2_fs_usage_to_text(struct printbuf *out,
222                            struct bch_fs *c,
223                            struct bch_fs_usage_online *fs_usage)
224 {
225         unsigned i;
226
227         pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
228
229         pr_buf(out, "hidden:\t\t\t\t%llu\n",
230                fs_usage->u.hidden);
231         pr_buf(out, "data:\t\t\t\t%llu\n",
232                fs_usage->u.data);
233         pr_buf(out, "cached:\t\t\t\t%llu\n",
234                fs_usage->u.cached);
235         pr_buf(out, "reserved:\t\t\t%llu\n",
236                fs_usage->u.reserved);
237         pr_buf(out, "nr_inodes:\t\t\t%llu\n",
238                fs_usage->u.nr_inodes);
239         pr_buf(out, "online reserved:\t\t%llu\n",
240                fs_usage->online_reserved);
241
242         for (i = 0;
243              i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
244              i++) {
245                 pr_buf(out, "%u replicas:\n", i + 1);
246                 pr_buf(out, "\treserved:\t\t%llu\n",
247                        fs_usage->u.persistent_reserved[i]);
248         }
249
250         for (i = 0; i < c->replicas.nr; i++) {
251                 struct bch_replicas_entry *e =
252                         cpu_replicas_entry(&c->replicas, i);
253
254                 pr_buf(out, "\t");
255                 bch2_replicas_entry_to_text(out, e);
256                 pr_buf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
257         }
258 }
259
260 #define RESERVE_FACTOR  6
261
262 static u64 reserve_factor(u64 r)
263 {
264         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
265 }
266
267 static u64 avail_factor(u64 r)
268 {
269         return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
270 }
271
272 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
273 {
274         return min(fs_usage->u.hidden +
275                    fs_usage->u.btree +
276                    fs_usage->u.data +
277                    reserve_factor(fs_usage->u.reserved +
278                                   fs_usage->online_reserved),
279                    c->capacity);
280 }
281
282 static struct bch_fs_usage_short
283 __bch2_fs_usage_read_short(struct bch_fs *c)
284 {
285         struct bch_fs_usage_short ret;
286         u64 data, reserved;
287
288         ret.capacity = c->capacity -
289                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
290
291         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
292                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
293         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
294                 percpu_u64_get(c->online_reserved);
295
296         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
297         ret.free        = ret.capacity - ret.used;
298
299         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
300
301         return ret;
302 }
303
304 struct bch_fs_usage_short
305 bch2_fs_usage_read_short(struct bch_fs *c)
306 {
307         struct bch_fs_usage_short ret;
308
309         percpu_down_read(&c->mark_lock);
310         ret = __bch2_fs_usage_read_short(c);
311         percpu_up_read(&c->mark_lock);
312
313         return ret;
314 }
315
316 static inline int is_unavailable_bucket(struct bucket_mark m)
317 {
318         return !is_available_bucket(m);
319 }
320
321 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
322                                             struct bucket_mark m)
323 {
324         return bucket_sectors_used(m)
325                 ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
326                 : 0;
327 }
328
329 static inline int is_stripe_data_bucket(struct bucket_mark m)
330 {
331         return m.stripe && m.data_type != BCH_DATA_parity;
332 }
333
334 static inline enum bch_data_type bucket_type(struct bucket_mark m)
335 {
336         return m.cached_sectors && !m.dirty_sectors
337                 ? BCH_DATA_cached
338                 : m.data_type;
339 }
340
341 static bool bucket_became_unavailable(struct bucket_mark old,
342                                       struct bucket_mark new)
343 {
344         return is_available_bucket(old) &&
345                !is_available_bucket(new);
346 }
347
348 static inline void account_bucket(struct bch_fs_usage *fs_usage,
349                                   struct bch_dev_usage *dev_usage,
350                                   enum bch_data_type type,
351                                   int nr, s64 size)
352 {
353         if (type == BCH_DATA_sb || type == BCH_DATA_journal)
354                 fs_usage->hidden        += size;
355
356         dev_usage->d[type].buckets      += nr;
357 }
358
359 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
360                                   struct bch_fs_usage *fs_usage,
361                                   struct bucket_mark old, struct bucket_mark new,
362                                   u64 journal_seq, bool gc)
363 {
364         struct bch_dev_usage *u;
365
366         percpu_rwsem_assert_held(&c->mark_lock);
367
368         preempt_disable();
369         if (!fs_usage)
370                 fs_usage = fs_usage_ptr(c, journal_seq, gc);
371         u = dev_usage_ptr(ca, journal_seq, gc);
372
373         if (bucket_type(old))
374                 account_bucket(fs_usage, u, bucket_type(old),
375                                -1, -ca->mi.bucket_size);
376
377         if (bucket_type(new))
378                 account_bucket(fs_usage, u, bucket_type(new),
379                                1, ca->mi.bucket_size);
380
381         u->buckets_ec += (int) new.stripe - (int) old.stripe;
382         u->buckets_unavailable +=
383                 is_unavailable_bucket(new) - is_unavailable_bucket(old);
384
385         u->d[old.data_type].sectors -= old.dirty_sectors;
386         u->d[new.data_type].sectors += new.dirty_sectors;
387         u->d[BCH_DATA_cached].sectors +=
388                 (int) new.cached_sectors - (int) old.cached_sectors;
389
390         u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
391         u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
392
393         preempt_enable();
394
395         if (!is_available_bucket(old) && is_available_bucket(new))
396                 bch2_wake_allocator(ca);
397 }
398
399 static inline int update_replicas(struct bch_fs *c,
400                                    struct bch_fs_usage *fs_usage,
401                                    struct bch_replicas_entry *r,
402                                    s64 sectors)
403 {
404         int idx = bch2_replicas_entry_idx(c, r);
405
406         if (idx < 0)
407                 return -1;
408
409         fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
410         fs_usage->replicas[idx]         += sectors;
411         return 0;
412 }
413
414 static inline int update_cached_sectors(struct bch_fs *c,
415                                          struct bch_fs_usage *fs_usage,
416                                          unsigned dev, s64 sectors)
417 {
418         struct bch_replicas_padded r;
419
420         bch2_replicas_entry_cached(&r.e, dev);
421
422         return update_replicas(c, fs_usage, &r.e, sectors);
423 }
424
425 static struct replicas_delta_list *
426 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
427 {
428         struct replicas_delta_list *d = trans->fs_usage_deltas;
429         unsigned new_size = d ? (d->size + more) * 2 : 128;
430         unsigned alloc_size = sizeof(*d) + new_size;
431
432         WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
433
434         if (!d || d->used + more > d->size) {
435                 d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
436
437                 BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
438
439                 if (!d) {
440                         d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
441                         memset(d, 0, REPLICAS_DELTA_LIST_MAX);
442
443                         if (trans->fs_usage_deltas)
444                                 memcpy(d, trans->fs_usage_deltas,
445                                        trans->fs_usage_deltas->size + sizeof(*d));
446
447                         new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
448                         kfree(trans->fs_usage_deltas);
449                 }
450
451                 d->size = new_size;
452                 trans->fs_usage_deltas = d;
453         }
454         return d;
455 }
456
457 static inline void update_replicas_list(struct btree_trans *trans,
458                                         struct bch_replicas_entry *r,
459                                         s64 sectors)
460 {
461         struct replicas_delta_list *d;
462         struct replicas_delta *n;
463         unsigned b;
464
465         if (!sectors)
466                 return;
467
468         b = replicas_entry_bytes(r) + 8;
469         d = replicas_deltas_realloc(trans, b);
470
471         n = (void *) d->d + d->used;
472         n->delta = sectors;
473         memcpy(&n->r, r, replicas_entry_bytes(r));
474         bch2_replicas_entry_sort(&n->r);
475         d->used += b;
476 }
477
478 static inline void update_cached_sectors_list(struct btree_trans *trans,
479                                               unsigned dev, s64 sectors)
480 {
481         struct bch_replicas_padded r;
482
483         bch2_replicas_entry_cached(&r.e, dev);
484
485         update_replicas_list(trans, &r.e, sectors);
486 }
487
488 #define do_mark_fn(fn, c, pos, flags, ...)                              \
489 ({                                                                      \
490         int gc, ret = 0;                                                \
491                                                                         \
492         percpu_rwsem_assert_held(&c->mark_lock);                        \
493                                                                         \
494         for (gc = 0; gc < 2 && !ret; gc++)                              \
495                 if (!gc == !(flags & BTREE_TRIGGER_GC) ||               \
496                     (gc && gc_visited(c, pos)))                         \
497                         ret = fn(c, __VA_ARGS__, gc);                   \
498         ret;                                                            \
499 })
500
501 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
502                             size_t b, bool owned_by_allocator)
503 {
504         struct bucket *g = bucket(ca, b);
505         struct bucket_mark old, new;
506
507         old = bucket_cmpxchg(g, new, ({
508                 new.owned_by_allocator  = owned_by_allocator;
509         }));
510
511         BUG_ON(owned_by_allocator == old.owned_by_allocator);
512 }
513
514 static int bch2_mark_alloc(struct bch_fs *c,
515                            struct bkey_s_c old, struct bkey_s_c new,
516                            struct bch_fs_usage *fs_usage,
517                            u64 journal_seq, unsigned flags)
518 {
519         bool gc = flags & BTREE_TRIGGER_GC;
520         struct bkey_alloc_unpacked u;
521         struct bch_dev *ca;
522         struct bucket *g;
523         struct bucket_mark old_m, m;
524
525         /* We don't do anything for deletions - do we?: */
526         if (new.k->type != KEY_TYPE_alloc &&
527             new.k->type != KEY_TYPE_alloc_v2)
528                 return 0;
529
530         /*
531          * alloc btree is read in by bch2_alloc_read, not gc:
532          */
533         if ((flags & BTREE_TRIGGER_GC) &&
534             !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
535                 return 0;
536
537         ca = bch_dev_bkey_exists(c, new.k->p.inode);
538
539         if (new.k->p.offset >= ca->mi.nbuckets)
540                 return 0;
541
542         g = __bucket(ca, new.k->p.offset, gc);
543         u = bch2_alloc_unpack(new);
544
545         old_m = bucket_cmpxchg(g, m, ({
546                 m.gen                   = u.gen;
547                 m.data_type             = u.data_type;
548                 m.dirty_sectors         = u.dirty_sectors;
549                 m.cached_sectors        = u.cached_sectors;
550                 m.stripe                = u.stripe != 0;
551
552                 if (journal_seq) {
553                         m.journal_seq_valid     = 1;
554                         m.journal_seq           = journal_seq;
555                 }
556         }));
557
558         bch2_dev_usage_update(c, ca, fs_usage, old_m, m, journal_seq, gc);
559
560         g->io_time[READ]        = u.read_time;
561         g->io_time[WRITE]       = u.write_time;
562         g->oldest_gen           = u.oldest_gen;
563         g->gen_valid            = 1;
564         g->stripe               = u.stripe;
565         g->stripe_redundancy    = u.stripe_redundancy;
566
567         /*
568          * need to know if we're getting called from the invalidate path or
569          * not:
570          */
571
572         if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
573             old_m.cached_sectors) {
574                 if (update_cached_sectors(c, fs_usage, ca->dev_idx,
575                                       -old_m.cached_sectors)) {
576                         bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
577                         return -1;
578                 }
579
580                 trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
581                                  old_m.cached_sectors);
582         }
583
584         return 0;
585 }
586
587 #define checked_add(a, b)                                       \
588 ({                                                              \
589         unsigned _res = (unsigned) (a) + (b);                   \
590         bool overflow = _res > U16_MAX;                         \
591         if (overflow)                                           \
592                 _res = U16_MAX;                                 \
593         (a) = _res;                                             \
594         overflow;                                               \
595 })
596
597 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
598                                        size_t b, enum bch_data_type data_type,
599                                        unsigned sectors, bool gc)
600 {
601         struct bucket *g = __bucket(ca, b, gc);
602         struct bucket_mark old, new;
603         bool overflow;
604
605         BUG_ON(data_type != BCH_DATA_sb &&
606                data_type != BCH_DATA_journal);
607
608         old = bucket_cmpxchg(g, new, ({
609                 new.data_type   = data_type;
610                 overflow = checked_add(new.dirty_sectors, sectors);
611         }));
612
613         bch2_fs_inconsistent_on(old.data_type &&
614                                 old.data_type != data_type, c,
615                 "different types of data in same bucket: %s, %s",
616                 bch2_data_types[old.data_type],
617                 bch2_data_types[data_type]);
618
619         bch2_fs_inconsistent_on(overflow, c,
620                 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
621                 ca->dev_idx, b, new.gen,
622                 bch2_data_types[old.data_type ?: data_type],
623                 old.dirty_sectors, sectors);
624
625         if (c)
626                 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
627                                       old, new, 0, gc);
628
629         return 0;
630 }
631
632 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
633                                size_t b, enum bch_data_type type,
634                                unsigned sectors, struct gc_pos pos,
635                                unsigned flags)
636 {
637         BUG_ON(type != BCH_DATA_sb &&
638                type != BCH_DATA_journal);
639
640         preempt_disable();
641
642         if (likely(c)) {
643                 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
644                            ca, b, type, sectors);
645         } else {
646                 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
647         }
648
649         preempt_enable();
650 }
651
652 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
653 {
654         return DIV_ROUND_UP(sectors * n, d);
655 }
656
657 static s64 __ptr_disk_sectors_delta(unsigned old_size,
658                                     unsigned offset, s64 delta,
659                                     unsigned flags,
660                                     unsigned n, unsigned d)
661 {
662         BUG_ON(!n || !d);
663
664         if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
665                 BUG_ON(offset + -delta > old_size);
666
667                 return -disk_sectors_scaled(n, d, old_size) +
668                         disk_sectors_scaled(n, d, offset) +
669                         disk_sectors_scaled(n, d, old_size - offset + delta);
670         } else if (flags & BTREE_TRIGGER_OVERWRITE) {
671                 BUG_ON(offset + -delta > old_size);
672
673                 return -disk_sectors_scaled(n, d, old_size) +
674                         disk_sectors_scaled(n, d, old_size + delta);
675         } else {
676                 return  disk_sectors_scaled(n, d, delta);
677         }
678 }
679
680 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
681                                   unsigned offset, s64 delta,
682                                   unsigned flags)
683 {
684         return __ptr_disk_sectors_delta(p.crc.live_size,
685                                         offset, delta, flags,
686                                         p.crc.compressed_size,
687                                         p.crc.uncompressed_size);
688 }
689
690 static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
691                             const struct bch_extent_ptr *ptr,
692                             s64 sectors, enum bch_data_type ptr_data_type,
693                             u8 bucket_gen, u8 bucket_data_type,
694                             u16 dirty_sectors, u16 cached_sectors)
695 {
696         size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
697         u16 bucket_sectors = !ptr->cached
698                 ? dirty_sectors
699                 : cached_sectors;
700         char buf[200];
701
702         if (gen_after(ptr->gen, bucket_gen)) {
703                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
704                         "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
705                         "while marking %s",
706                         ptr->dev, bucket_nr, bucket_gen,
707                         bch2_data_types[bucket_data_type ?: ptr_data_type],
708                         ptr->gen,
709                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
710                 return -EIO;
711         }
712
713         if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
714                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
715                         "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
716                         "while marking %s",
717                         ptr->dev, bucket_nr, bucket_gen,
718                         bch2_data_types[bucket_data_type ?: ptr_data_type],
719                         ptr->gen,
720                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
721                 return -EIO;
722         }
723
724         if (bucket_gen != ptr->gen && !ptr->cached) {
725                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
726                         "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
727                         "while marking %s",
728                         ptr->dev, bucket_nr, bucket_gen,
729                         bch2_data_types[bucket_data_type ?: ptr_data_type],
730                         ptr->gen,
731                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
732                 return -EIO;
733         }
734
735         if (bucket_gen != ptr->gen)
736                 return 1;
737
738         if (bucket_data_type && ptr_data_type &&
739             bucket_data_type != ptr_data_type) {
740                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
741                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
742                         "while marking %s",
743                         ptr->dev, bucket_nr, bucket_gen,
744                         bch2_data_types[bucket_data_type],
745                         bch2_data_types[ptr_data_type],
746                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
747                 return -EIO;
748         }
749
750         if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
751                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
752                         "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
753                         "while marking %s",
754                         ptr->dev, bucket_nr, bucket_gen,
755                         bch2_data_types[bucket_data_type ?: ptr_data_type],
756                         bucket_sectors, sectors,
757                         (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
758                 return -EIO;
759         }
760
761         return 0;
762 }
763
764 static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k,
765                              unsigned ptr_idx,
766                              struct bch_fs_usage *fs_usage,
767                              u64 journal_seq, unsigned flags)
768 {
769         const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
770         unsigned nr_data = s->nr_blocks - s->nr_redundant;
771         bool parity = ptr_idx >= nr_data;
772         const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
773         bool gc = flags & BTREE_TRIGGER_GC;
774         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
775         struct bucket *g = PTR_BUCKET(ca, ptr, gc);
776         struct bucket_mark new, old;
777         char buf[200];
778         int ret;
779
780         if (g->stripe && g->stripe != k.k->p.offset) {
781                 bch2_fs_inconsistent(c,
782                               "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
783                               ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
784                               (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
785                 return -EINVAL;
786         }
787
788         old = bucket_cmpxchg(g, new, ({
789                 ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
790                                        new.dirty_sectors, new.cached_sectors);
791                 if (ret)
792                         return ret;
793
794                 if (parity) {
795                         new.data_type           = BCH_DATA_parity;
796                         new.dirty_sectors       = le16_to_cpu(s->sectors);
797                 }
798
799                 if (journal_seq) {
800                         new.journal_seq_valid   = 1;
801                         new.journal_seq         = journal_seq;
802                 }
803         }));
804
805         g->stripe               = k.k->p.offset;
806         g->stripe_redundancy    = s->nr_redundant;
807
808         bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
809         return 0;
810 }
811
812 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
813                           const struct bch_extent_ptr *ptr,
814                           s64 sectors, enum bch_data_type ptr_data_type,
815                           u8 bucket_gen, u8 *bucket_data_type,
816                           u16 *dirty_sectors, u16 *cached_sectors)
817 {
818         u16 *dst_sectors = !ptr->cached
819                 ? dirty_sectors
820                 : cached_sectors;
821         int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
822                                    bucket_gen, *bucket_data_type,
823                                    *dirty_sectors, *cached_sectors);
824
825         if (ret)
826                 return ret;
827
828         *dst_sectors += sectors;
829         *bucket_data_type = *dirty_sectors || *cached_sectors
830                 ? ptr_data_type : 0;
831         return 0;
832 }
833
834 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
835                              struct extent_ptr_decoded p,
836                              s64 sectors, enum bch_data_type data_type,
837                              struct bch_fs_usage *fs_usage,
838                              u64 journal_seq, unsigned flags)
839 {
840         bool gc = flags & BTREE_TRIGGER_GC;
841         struct bucket_mark old, new;
842         struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
843         struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
844         u8 bucket_data_type;
845         u64 v;
846         int ret;
847
848         v = atomic64_read(&g->_mark.v);
849         do {
850                 new.v.counter = old.v.counter = v;
851                 bucket_data_type = new.data_type;
852
853                 ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
854                                      &bucket_data_type,
855                                      &new.dirty_sectors,
856                                      &new.cached_sectors);
857                 if (ret)
858                         return ret;
859
860                 new.data_type = bucket_data_type;
861
862                 if (journal_seq) {
863                         new.journal_seq_valid = 1;
864                         new.journal_seq = journal_seq;
865                 }
866
867                 if (flags & BTREE_TRIGGER_NOATOMIC) {
868                         g->_mark = new;
869                         break;
870                 }
871         } while ((v = atomic64_cmpxchg(&g->_mark.v,
872                               old.v.counter,
873                               new.v.counter)) != old.v.counter);
874
875         bch2_dev_usage_update(c, ca, fs_usage, old, new, journal_seq, gc);
876
877         BUG_ON(!gc && bucket_became_unavailable(old, new));
878
879         return 0;
880 }
881
882 static int bch2_mark_stripe_ptr(struct bch_fs *c,
883                                 struct bch_extent_stripe_ptr p,
884                                 enum bch_data_type data_type,
885                                 struct bch_fs_usage *fs_usage,
886                                 s64 sectors, unsigned flags)
887 {
888         bool gc = flags & BTREE_TRIGGER_GC;
889         struct bch_replicas_padded r;
890         struct stripe *m;
891         unsigned i, blocks_nonempty = 0;
892
893         m = genradix_ptr(&c->stripes[gc], p.idx);
894
895         spin_lock(&c->ec_stripes_heap_lock);
896
897         if (!m || !m->alive) {
898                 spin_unlock(&c->ec_stripes_heap_lock);
899                 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
900                                     (u64) p.idx);
901                 bch2_inconsistent_error(c);
902                 return -EIO;
903         }
904
905         m->block_sectors[p.block] += sectors;
906
907         r = m->r;
908
909         for (i = 0; i < m->nr_blocks; i++)
910                 blocks_nonempty += m->block_sectors[i] != 0;
911
912         if (m->blocks_nonempty != blocks_nonempty) {
913                 m->blocks_nonempty = blocks_nonempty;
914                 if (!gc)
915                         bch2_stripes_heap_update(c, m, p.idx);
916         }
917
918         spin_unlock(&c->ec_stripes_heap_lock);
919
920         r.e.data_type = data_type;
921         update_replicas(c, fs_usage, &r.e, sectors);
922
923         return 0;
924 }
925
926 static int bch2_mark_extent(struct bch_fs *c,
927                             struct bkey_s_c old, struct bkey_s_c new,
928                             unsigned offset, s64 sectors,
929                             enum bch_data_type data_type,
930                             struct bch_fs_usage *fs_usage,
931                             unsigned journal_seq, unsigned flags)
932 {
933         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
934         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
935         const union bch_extent_entry *entry;
936         struct extent_ptr_decoded p;
937         struct bch_replicas_padded r;
938         s64 dirty_sectors = 0;
939         bool stale;
940         int ret;
941
942         r.e.data_type   = data_type;
943         r.e.nr_devs     = 0;
944         r.e.nr_required = 1;
945
946         BUG_ON(!sectors);
947
948         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
949                 s64 disk_sectors = data_type == BCH_DATA_btree
950                         ? sectors
951                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
952
953                 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
954                                         fs_usage, journal_seq, flags);
955                 if (ret < 0)
956                         return ret;
957
958                 stale = ret > 0;
959
960                 if (p.ptr.cached) {
961                         if (!stale)
962                                 if (update_cached_sectors(c, fs_usage, p.ptr.dev,
963                                                           disk_sectors)) {
964                                         bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
965                                         return -1;
966
967                                 }
968                 } else if (!p.has_ec) {
969                         dirty_sectors          += disk_sectors;
970                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
971                 } else {
972                         ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
973                                         fs_usage, disk_sectors, flags);
974                         if (ret)
975                                 return ret;
976
977                         /*
978                          * There may be other dirty pointers in this extent, but
979                          * if so they're not required for mounting if we have an
980                          * erasure coded pointer in this extent:
981                          */
982                         r.e.nr_required = 0;
983                 }
984         }
985
986         if (r.e.nr_devs) {
987                 if (update_replicas(c, fs_usage, &r.e, dirty_sectors)) {
988                         char buf[200];
989
990                         bch2_bkey_val_to_text(&PBUF(buf), c, k);
991                         bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
992                         return -1;
993                 }
994         }
995
996         return 0;
997 }
998
999 static int bch2_mark_stripe(struct bch_fs *c,
1000                             struct bkey_s_c old, struct bkey_s_c new,
1001                             struct bch_fs_usage *fs_usage,
1002                             u64 journal_seq, unsigned flags)
1003 {
1004         bool gc = flags & BTREE_TRIGGER_GC;
1005         size_t idx = new.k->p.offset;
1006         const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
1007                 ? bkey_s_c_to_stripe(old).v : NULL;
1008         const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
1009                 ? bkey_s_c_to_stripe(new).v : NULL;
1010         struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1011         unsigned i;
1012         int ret;
1013
1014         BUG_ON(gc && old_s);
1015
1016         if (!m || (old_s && !m->alive)) {
1017                 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1018                                     idx);
1019                 bch2_inconsistent_error(c);
1020                 return -1;
1021         }
1022
1023         if (!new_s) {
1024                 spin_lock(&c->ec_stripes_heap_lock);
1025                 bch2_stripes_heap_del(c, m, idx);
1026                 spin_unlock(&c->ec_stripes_heap_lock);
1027
1028                 memset(m, 0, sizeof(*m));
1029         } else {
1030                 m->alive        = true;
1031                 m->sectors      = le16_to_cpu(new_s->sectors);
1032                 m->algorithm    = new_s->algorithm;
1033                 m->nr_blocks    = new_s->nr_blocks;
1034                 m->nr_redundant = new_s->nr_redundant;
1035                 m->blocks_nonempty = 0;
1036
1037                 for (i = 0; i < new_s->nr_blocks; i++) {
1038                         m->block_sectors[i] =
1039                                 stripe_blockcount_get(new_s, i);
1040                         m->blocks_nonempty += !!m->block_sectors[i];
1041
1042                         m->ptrs[i] = new_s->ptrs[i];
1043                 }
1044
1045                 bch2_bkey_to_replicas(&m->r.e, new);
1046
1047                 if (!gc) {
1048                         spin_lock(&c->ec_stripes_heap_lock);
1049                         bch2_stripes_heap_update(c, m, idx);
1050                         spin_unlock(&c->ec_stripes_heap_lock);
1051                 }
1052         }
1053
1054         if (gc) {
1055                 /*
1056                  * gc recalculates this field from stripe ptr
1057                  * references:
1058                  */
1059                 memset(m->block_sectors, 0, sizeof(m->block_sectors));
1060                 m->blocks_nonempty = 0;
1061
1062                 for (i = 0; i < new_s->nr_blocks; i++) {
1063                         ret = mark_stripe_bucket(c, new, i, fs_usage,
1064                                                  journal_seq, flags);
1065                         if (ret)
1066                                 return ret;
1067                 }
1068
1069                 if (update_replicas(c, fs_usage, &m->r.e,
1070                                 ((s64) m->sectors * m->nr_redundant))) {
1071                         char buf[200];
1072
1073                         bch2_bkey_val_to_text(&PBUF(buf), c, new);
1074                         bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
1075                         return -1;
1076                 }
1077         }
1078
1079         return 0;
1080 }
1081
1082 static int bch2_mark_key_locked(struct bch_fs *c,
1083                    struct bkey_s_c old,
1084                    struct bkey_s_c new,
1085                    unsigned offset, s64 sectors,
1086                    struct bch_fs_usage *fs_usage,
1087                    u64 journal_seq, unsigned flags)
1088 {
1089         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1090         int ret = 0;
1091
1092         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1093
1094         preempt_disable();
1095
1096         if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1097                 fs_usage = fs_usage_ptr(c, journal_seq,
1098                                         flags & BTREE_TRIGGER_GC);
1099
1100         switch (k.k->type) {
1101         case KEY_TYPE_alloc:
1102         case KEY_TYPE_alloc_v2:
1103                 ret = bch2_mark_alloc(c, old, new, fs_usage, journal_seq, flags);
1104                 break;
1105         case KEY_TYPE_btree_ptr:
1106         case KEY_TYPE_btree_ptr_v2:
1107                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1108                         ?  c->opts.btree_node_size
1109                         : -c->opts.btree_node_size;
1110
1111                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1112                                 BCH_DATA_btree, fs_usage, journal_seq, flags);
1113                 break;
1114         case KEY_TYPE_extent:
1115         case KEY_TYPE_reflink_v:
1116                 ret = bch2_mark_extent(c, old, new, offset, sectors,
1117                                 BCH_DATA_user, fs_usage, journal_seq, flags);
1118                 break;
1119         case KEY_TYPE_stripe:
1120                 ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
1121                 break;
1122         case KEY_TYPE_inode:
1123                 fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode;
1124                 fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode;
1125                 break;
1126         case KEY_TYPE_reservation: {
1127                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1128
1129                 sectors *= replicas;
1130                 replicas = clamp_t(unsigned, replicas, 1,
1131                                    ARRAY_SIZE(fs_usage->persistent_reserved));
1132
1133                 fs_usage->reserved                              += sectors;
1134                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1135                 break;
1136         }
1137         }
1138
1139         preempt_enable();
1140
1141         return ret;
1142 }
1143
1144 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new,
1145                   unsigned offset, s64 sectors,
1146                   struct bch_fs_usage *fs_usage,
1147                   u64 journal_seq, unsigned flags)
1148 {
1149         struct bkey deleted;
1150         struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
1151         int ret;
1152
1153         bkey_init(&deleted);
1154
1155         percpu_down_read(&c->mark_lock);
1156         ret = bch2_mark_key_locked(c, old, new, offset, sectors,
1157                                    fs_usage, journal_seq,
1158                                    BTREE_TRIGGER_INSERT|flags);
1159         percpu_up_read(&c->mark_lock);
1160
1161         return ret;
1162 }
1163
1164 int bch2_mark_update(struct btree_trans *trans,
1165                      struct btree_iter *iter,
1166                      struct bkey_i *new,
1167                      struct bch_fs_usage *fs_usage,
1168                      unsigned flags)
1169 {
1170         struct bch_fs           *c = trans->c;
1171         struct bkey_s_c         old;
1172         struct bkey             unpacked;
1173         int ret = 0;
1174
1175         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1176                 return 0;
1177
1178         if (!btree_node_type_needs_gc(iter->btree_id))
1179                 return 0;
1180
1181         bkey_init(&unpacked);
1182         old = (struct bkey_s_c) { &unpacked, NULL };
1183
1184         if (!btree_node_type_is_extents(iter->btree_id)) {
1185                 /* iterators should be uptodate, shouldn't get errors here: */
1186                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1187                         old = bch2_btree_iter_peek_slot(iter);
1188                         BUG_ON(bkey_err(old));
1189                 } else {
1190                         struct bkey_cached *ck = (void *) iter->l[0].b;
1191
1192                         if (ck->valid)
1193                                 old = bkey_i_to_s_c(ck->k);
1194                 }
1195
1196                 if (old.k->type == new->k.type) {
1197                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1198                                 fs_usage, trans->journal_res.seq,
1199                                 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1200
1201                 } else {
1202                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1203                                 fs_usage, trans->journal_res.seq,
1204                                 BTREE_TRIGGER_INSERT|flags);
1205                         bch2_mark_key_locked(c, old, bkey_i_to_s_c(new), 0, 0,
1206                                 fs_usage, trans->journal_res.seq,
1207                                 BTREE_TRIGGER_OVERWRITE|flags);
1208                 }
1209         } else {
1210                 struct btree_iter *copy;
1211
1212                 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1213                 bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1214                         0, new->k.size,
1215                         fs_usage, trans->journal_res.seq,
1216                         BTREE_TRIGGER_INSERT|flags);
1217
1218                 copy = bch2_trans_copy_iter(trans, iter);
1219
1220                 for_each_btree_key_continue(copy, 0, old, ret) {
1221                         unsigned offset = 0;
1222                         s64 sectors = -((s64) old.k->size);
1223
1224                         flags |= BTREE_TRIGGER_OVERWRITE;
1225
1226                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1227                                 break;
1228
1229                         switch (bch2_extent_overlap(&new->k, old.k)) {
1230                         case BCH_EXTENT_OVERLAP_ALL:
1231                                 offset = 0;
1232                                 sectors = -((s64) old.k->size);
1233                                 break;
1234                         case BCH_EXTENT_OVERLAP_BACK:
1235                                 offset = bkey_start_offset(&new->k) -
1236                                         bkey_start_offset(old.k);
1237                                 sectors = bkey_start_offset(&new->k) -
1238                                         old.k->p.offset;
1239                                 break;
1240                         case BCH_EXTENT_OVERLAP_FRONT:
1241                                 offset = 0;
1242                                 sectors = bkey_start_offset(old.k) -
1243                                         new->k.p.offset;
1244                                 break;
1245                         case BCH_EXTENT_OVERLAP_MIDDLE:
1246                                 offset = bkey_start_offset(&new->k) -
1247                                         bkey_start_offset(old.k);
1248                                 sectors = -((s64) new->k.size);
1249                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1250                                 break;
1251                         }
1252
1253                         BUG_ON(sectors >= 0);
1254
1255                         ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
1256                                         offset, sectors, fs_usage,
1257                                         trans->journal_res.seq, flags) ?: 1;
1258                         if (ret <= 0)
1259                                 break;
1260                 }
1261                 bch2_trans_iter_put(trans, copy);
1262         }
1263
1264         return ret;
1265 }
1266
1267 static noinline __cold
1268 void fs_usage_apply_warn(struct btree_trans *trans,
1269                          unsigned disk_res_sectors,
1270                          s64 should_not_have_added)
1271 {
1272         struct bch_fs *c = trans->c;
1273         struct btree_insert_entry *i;
1274         char buf[200];
1275
1276         bch_err(c, "disk usage increased %lli more than %u sectors reserved",
1277                 should_not_have_added, disk_res_sectors);
1278
1279         trans_for_each_update(trans, i) {
1280                 pr_err("while inserting");
1281                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1282                 pr_err("%s", buf);
1283                 pr_err("overlapping with");
1284
1285                 if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
1286                         struct btree_iter *copy = bch2_trans_copy_iter(trans, i->iter);
1287                         struct bkey_s_c k;
1288                         int ret;
1289
1290                         for_each_btree_key_continue(copy, 0, k, ret) {
1291                                 if (btree_node_type_is_extents(i->iter->btree_id)
1292                                     ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1293                                     : bkey_cmp(i->k->k.p, k.k->p))
1294                                         break;
1295
1296                                 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1297                                 pr_err("%s", buf);
1298                         }
1299                         bch2_trans_iter_put(trans, copy);
1300                 } else {
1301                         struct bkey_cached *ck = (void *) i->iter->l[0].b;
1302
1303                         if (ck->valid) {
1304                                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
1305                                 pr_err("%s", buf);
1306                         }
1307                 }
1308         }
1309         __WARN();
1310 }
1311
1312 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1313                                struct replicas_delta_list *deltas)
1314 {
1315         struct bch_fs *c = trans->c;
1316         static int warned_disk_usage = 0;
1317         bool warn = false;
1318         unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1319         struct replicas_delta *d = deltas->d;
1320         struct replicas_delta *top = (void *) deltas->d + deltas->used;
1321         struct bch_fs_usage *dst;
1322         s64 added = 0, should_not_have_added;
1323         unsigned i;
1324
1325         percpu_rwsem_assert_held(&c->mark_lock);
1326
1327         preempt_disable();
1328         dst = fs_usage_ptr(c, trans->journal_res.seq, false);
1329
1330         for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
1331                 switch (d->r.data_type) {
1332                 case BCH_DATA_btree:
1333                 case BCH_DATA_user:
1334                 case BCH_DATA_parity:
1335                         added += d->delta;
1336                 }
1337
1338                 BUG_ON(update_replicas(c, dst, &d->r, d->delta));
1339         }
1340
1341         dst->nr_inodes += deltas->nr_inodes;
1342
1343         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
1344                 added                           += deltas->persistent_reserved[i];
1345                 dst->reserved                   += deltas->persistent_reserved[i];
1346                 dst->persistent_reserved[i]     += deltas->persistent_reserved[i];
1347         }
1348
1349         /*
1350          * Not allowed to reduce sectors_available except by getting a
1351          * reservation:
1352          */
1353         should_not_have_added = added - (s64) disk_res_sectors;
1354         if (unlikely(should_not_have_added > 0)) {
1355                 atomic64_sub(should_not_have_added, &c->sectors_available);
1356                 added -= should_not_have_added;
1357                 warn = true;
1358         }
1359
1360         if (added > 0) {
1361                 trans->disk_res->sectors -= added;
1362                 this_cpu_sub(*c->online_reserved, added);
1363         }
1364
1365         preempt_enable();
1366
1367         if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
1368                 fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
1369 }
1370
1371 /* trans_mark: */
1372
1373 static struct btree_iter *trans_get_update(struct btree_trans *trans,
1374                             enum btree_id btree_id, struct bpos pos,
1375                             struct bkey_s_c *k)
1376 {
1377         struct btree_insert_entry *i;
1378
1379         trans_for_each_update(trans, i)
1380                 if (i->iter->btree_id == btree_id &&
1381                     (btree_node_type_is_extents(btree_id)
1382                      ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1383                        bkey_cmp(pos, i->k->k.p) < 0
1384                      : !bkey_cmp(pos, i->iter->pos))) {
1385                         *k = bkey_i_to_s_c(i->k);
1386
1387                         /* ugly hack.. */
1388                         BUG_ON(btree_iter_live(trans, i->iter));
1389                         trans->iters_live |= 1ULL << i->iter->idx;
1390                         return i->iter;
1391                 }
1392
1393         return NULL;
1394 }
1395
1396 static int trans_get_key(struct btree_trans *trans,
1397                          enum btree_id btree_id, struct bpos pos,
1398                          struct btree_iter **iter,
1399                          struct bkey_s_c *k)
1400 {
1401         unsigned flags = btree_id != BTREE_ID_alloc
1402                 ? BTREE_ITER_SLOTS
1403                 : BTREE_ITER_CACHED;
1404         int ret;
1405
1406         *iter = trans_get_update(trans, btree_id, pos, k);
1407         if (*iter)
1408                 return 1;
1409
1410         *iter = bch2_trans_get_iter(trans, btree_id, pos,
1411                                     flags|BTREE_ITER_INTENT);
1412         *k = __bch2_btree_iter_peek(*iter, flags);
1413         ret = bkey_err(*k);
1414         if (ret)
1415                 bch2_trans_iter_put(trans, *iter);
1416         return ret;
1417 }
1418
1419 static struct bkey_alloc_buf *
1420 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_iter,
1421                               const struct bch_extent_ptr *ptr,
1422                               struct bkey_alloc_unpacked *u)
1423 {
1424         struct bch_fs *c = trans->c;
1425         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1426         struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
1427         struct bucket *g;
1428         struct btree_iter *iter;
1429         struct bkey_s_c k;
1430         struct bkey_alloc_buf *a;
1431         int ret;
1432
1433         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
1434         if (IS_ERR(a))
1435                 return a;
1436
1437         iter = trans_get_update(trans, BTREE_ID_alloc, pos, &k);
1438         if (iter) {
1439                 *u = bch2_alloc_unpack(k);
1440         } else {
1441                 iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, pos,
1442                                            BTREE_ITER_CACHED|
1443                                            BTREE_ITER_CACHED_NOFILL|
1444                                            BTREE_ITER_INTENT);
1445                 ret = bch2_btree_iter_traverse(iter);
1446                 if (ret) {
1447                         bch2_trans_iter_put(trans, iter);
1448                         return ERR_PTR(ret);
1449                 }
1450
1451                 percpu_down_read(&c->mark_lock);
1452                 g = bucket(ca, pos.offset);
1453                 *u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
1454                 percpu_up_read(&c->mark_lock);
1455         }
1456
1457         *_iter = iter;
1458         return a;
1459 }
1460
1461 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1462                         struct bkey_s_c k, struct extent_ptr_decoded p,
1463                         s64 sectors, enum bch_data_type data_type)
1464 {
1465         struct bch_fs *c = trans->c;
1466         struct btree_iter *iter;
1467         struct bkey_alloc_unpacked u;
1468         struct bkey_alloc_buf *a;
1469         int ret;
1470
1471         a = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
1472         if (IS_ERR(a))
1473                 return PTR_ERR(a);
1474
1475         ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
1476                              &u.dirty_sectors, &u.cached_sectors);
1477         if (ret)
1478                 goto out;
1479
1480         bch2_alloc_pack(c, a, u);
1481         bch2_trans_update(trans, iter, &a->k, 0);
1482 out:
1483         bch2_trans_iter_put(trans, iter);
1484         return ret;
1485 }
1486
1487 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1488                         struct extent_ptr_decoded p,
1489                         s64 sectors, enum bch_data_type data_type)
1490 {
1491         struct bch_fs *c = trans->c;
1492         struct btree_iter *iter;
1493         struct bkey_s_c k;
1494         struct bkey_i_stripe *s;
1495         struct bch_replicas_padded r;
1496         int ret = 0;
1497
1498         ret = trans_get_key(trans, BTREE_ID_stripes, POS(0, p.ec.idx), &iter, &k);
1499         if (ret < 0)
1500                 return ret;
1501
1502         if (k.k->type != KEY_TYPE_stripe) {
1503                 bch2_fs_inconsistent(c,
1504                         "pointer to nonexistent stripe %llu",
1505                         (u64) p.ec.idx);
1506                 bch2_inconsistent_error(c);
1507                 ret = -EIO;
1508                 goto out;
1509         }
1510
1511         if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
1512                 bch2_fs_inconsistent(c,
1513                         "stripe pointer doesn't match stripe %llu",
1514                         (u64) p.ec.idx);
1515                 ret = -EIO;
1516                 goto out;
1517         }
1518
1519         s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1520         ret = PTR_ERR_OR_ZERO(s);
1521         if (ret)
1522                 goto out;
1523
1524         bkey_reassemble(&s->k_i, k);
1525         stripe_blockcount_set(&s->v, p.ec.block,
1526                 stripe_blockcount_get(&s->v, p.ec.block) +
1527                 sectors);
1528         bch2_trans_update(trans, iter, &s->k_i, 0);
1529
1530         bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
1531         r.e.data_type = data_type;
1532         update_replicas_list(trans, &r.e, sectors);
1533 out:
1534         bch2_trans_iter_put(trans, iter);
1535         return ret;
1536 }
1537
1538 static int bch2_trans_mark_extent(struct btree_trans *trans,
1539                         struct bkey_s_c k, unsigned offset,
1540                         s64 sectors, unsigned flags,
1541                         enum bch_data_type data_type)
1542 {
1543         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1544         const union bch_extent_entry *entry;
1545         struct extent_ptr_decoded p;
1546         struct bch_replicas_padded r;
1547         s64 dirty_sectors = 0;
1548         bool stale;
1549         int ret;
1550
1551         r.e.data_type   = data_type;
1552         r.e.nr_devs     = 0;
1553         r.e.nr_required = 1;
1554
1555         BUG_ON(!sectors);
1556
1557         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1558                 s64 disk_sectors = data_type == BCH_DATA_btree
1559                         ? sectors
1560                         : ptr_disk_sectors_delta(p, offset, sectors, flags);
1561
1562                 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1563                                               data_type);
1564                 if (ret < 0)
1565                         return ret;
1566
1567                 stale = ret > 0;
1568
1569                 if (p.ptr.cached) {
1570                         if (!stale)
1571                                 update_cached_sectors_list(trans, p.ptr.dev,
1572                                                            disk_sectors);
1573                 } else if (!p.has_ec) {
1574                         dirty_sectors          += disk_sectors;
1575                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1576                 } else {
1577                         ret = bch2_trans_mark_stripe_ptr(trans, p,
1578                                         disk_sectors, data_type);
1579                         if (ret)
1580                                 return ret;
1581
1582                         r.e.nr_required = 0;
1583                 }
1584         }
1585
1586         if (r.e.nr_devs)
1587                 update_replicas_list(trans, &r.e, dirty_sectors);
1588
1589         return 0;
1590 }
1591
1592 static int bch2_trans_mark_stripe_alloc_ref(struct btree_trans *trans,
1593                                             struct bkey_s_c_stripe s,
1594                                             unsigned idx, bool deleting)
1595 {
1596         struct bch_fs *c = trans->c;
1597         const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
1598         struct bkey_alloc_buf *a;
1599         struct btree_iter *iter;
1600         struct bkey_alloc_unpacked u;
1601         bool parity = idx >= s.v->nr_blocks - s.v->nr_redundant;
1602         int ret = 0;
1603
1604         a = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
1605         if (IS_ERR(a))
1606                 return PTR_ERR(a);
1607
1608         if (parity) {
1609                 s64 sectors = le16_to_cpu(s.v->sectors);
1610
1611                 if (deleting)
1612                         sectors = -sectors;
1613
1614                 u.dirty_sectors += sectors;
1615                 u.data_type = u.dirty_sectors
1616                         ? BCH_DATA_parity
1617                         : 0;
1618         }
1619
1620         if (!deleting) {
1621                 if (bch2_fs_inconsistent_on(u.stripe && u.stripe != s.k->p.offset, c,
1622                                 "bucket %llu:%llu gen %u: multiple stripes using same bucket (%u, %llu)",
1623                                 iter->pos.inode, iter->pos.offset, u.gen,
1624                                 u.stripe, s.k->p.offset)) {
1625                         ret = -EIO;
1626                         goto err;
1627                 }
1628
1629                 u.stripe                = s.k->p.offset;
1630                 u.stripe_redundancy     = s.v->nr_redundant;
1631         } else {
1632                 u.stripe                = 0;
1633                 u.stripe_redundancy     = 0;
1634         }
1635
1636         bch2_alloc_pack(c, a, u);
1637         bch2_trans_update(trans, iter, &a->k, 0);
1638 err:
1639         bch2_trans_iter_put(trans, iter);
1640         return ret;
1641 }
1642
1643 static int bch2_trans_mark_stripe(struct btree_trans *trans,
1644                                   struct bkey_s_c old, struct bkey_s_c new,
1645                                   unsigned flags)
1646 {
1647         struct bkey_s_c_stripe old_s = { .k = NULL };
1648         struct bkey_s_c_stripe new_s = { .k = NULL };
1649         struct bch_replicas_padded r;
1650         unsigned i;
1651         int ret = 0;
1652
1653         if (old.k->type == KEY_TYPE_stripe)
1654                 old_s = bkey_s_c_to_stripe(old);
1655         if (new.k->type == KEY_TYPE_stripe)
1656                 new_s = bkey_s_c_to_stripe(new);
1657
1658         /*
1659          * If the pointers aren't changing, we don't need to do anything:
1660          */
1661         if (new_s.k && old_s.k &&
1662             new_s.v->nr_blocks          == old_s.v->nr_blocks &&
1663             new_s.v->nr_redundant       == old_s.v->nr_redundant &&
1664             !memcmp(old_s.v->ptrs, new_s.v->ptrs,
1665                     new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
1666                 return 0;
1667
1668         if (new_s.k) {
1669                 s64 sectors = le16_to_cpu(new_s.v->sectors);
1670
1671                 bch2_bkey_to_replicas(&r.e, new);
1672                 update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
1673
1674                 for (i = 0; i < new_s.v->nr_blocks; i++) {
1675                         ret = bch2_trans_mark_stripe_alloc_ref(trans, new_s,
1676                                                                i, false);
1677                         if (ret)
1678                                 return ret;
1679                 }
1680         }
1681
1682         if (old_s.k) {
1683                 s64 sectors = -((s64) le16_to_cpu(old_s.v->sectors));
1684
1685                 bch2_bkey_to_replicas(&r.e, old);
1686                 update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
1687
1688                 for (i = 0; i < old_s.v->nr_blocks; i++) {
1689                         ret = bch2_trans_mark_stripe_alloc_ref(trans, old_s,
1690                                                                i, true);
1691                         if (ret)
1692                                 return ret;
1693                 }
1694         }
1695
1696         return ret;
1697 }
1698
1699 static __le64 *bkey_refcount(struct bkey_i *k)
1700 {
1701         switch (k->k.type) {
1702         case KEY_TYPE_reflink_v:
1703                 return &bkey_i_to_reflink_v(k)->v.refcount;
1704         case KEY_TYPE_indirect_inline_data:
1705                 return &bkey_i_to_indirect_inline_data(k)->v.refcount;
1706         default:
1707                 return NULL;
1708         }
1709 }
1710
1711 static bool reflink_p_frag_references(struct bkey_s_c_reflink_p p,
1712                                       u64 start, u64 end,
1713                                       struct bkey_s_c k)
1714 {
1715         if (start == end)
1716                 return false;
1717
1718         start   += le64_to_cpu(p.v->idx);
1719         end     += le64_to_cpu(p.v->idx);
1720
1721         if (end <= bkey_start_offset(k.k))
1722                 return false;
1723         if (start >= k.k->p.offset)
1724                 return false;
1725         return true;
1726 }
1727
1728 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1729                         struct bkey_s_c_reflink_p p,
1730                         u64 idx, unsigned sectors,
1731                         unsigned front_frag,
1732                         unsigned back_frag,
1733                         unsigned flags)
1734 {
1735         struct bch_fs *c = trans->c;
1736         struct btree_iter *iter;
1737         struct bkey_s_c k;
1738         struct bkey_i *n;
1739         __le64 *refcount;
1740         int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
1741         s64 ret;
1742
1743         ret = trans_get_key(trans, BTREE_ID_reflink,
1744                             POS(0, idx), &iter, &k);
1745         if (ret < 0)
1746                 return ret;
1747
1748         if (reflink_p_frag_references(p, 0, front_frag, k) &&
1749             reflink_p_frag_references(p, back_frag, p.k->size, k)) {
1750                 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE_SPLIT));
1751                 add = -add;
1752         } else if (reflink_p_frag_references(p, 0, front_frag, k) ||
1753                    reflink_p_frag_references(p, back_frag, p.k->size, k)) {
1754                 BUG_ON(!(flags & BTREE_TRIGGER_OVERWRITE));
1755                 goto out;
1756         }
1757
1758         sectors = min_t(u64, sectors, k.k->p.offset - idx);
1759
1760         n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1761         ret = PTR_ERR_OR_ZERO(n);
1762         if (ret)
1763                 goto err;
1764
1765         bkey_reassemble(n, k);
1766
1767         refcount = bkey_refcount(n);
1768         if (!refcount) {
1769                 bch2_fs_inconsistent(c,
1770                         "%llu:%llu len %u points to nonexistent indirect extent %llu",
1771                         p.k->p.inode, p.k->p.offset, p.k->size, idx);
1772                 bch2_inconsistent_error(c);
1773                 ret = -EIO;
1774                 goto err;
1775         }
1776
1777         BUG_ON(!*refcount && (flags & BTREE_TRIGGER_OVERWRITE));
1778         le64_add_cpu(refcount, add);
1779
1780         if (!*refcount) {
1781                 n->k.type = KEY_TYPE_deleted;
1782                 set_bkey_val_u64s(&n->k, 0);
1783         }
1784
1785         bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1786         bch2_trans_update(trans, iter, n, 0);
1787 out:
1788         ret = sectors;
1789 err:
1790         bch2_trans_iter_put(trans, iter);
1791         return ret;
1792 }
1793
1794 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1795                         struct bkey_s_c_reflink_p p, unsigned offset,
1796                         s64 sectors, unsigned flags)
1797 {
1798         u64 idx = le64_to_cpu(p.v->idx) + offset;
1799         unsigned front_frag, back_frag;
1800         s64 ret = 0;
1801
1802         if (sectors < 0)
1803                 sectors = -sectors;
1804
1805         BUG_ON(offset + sectors > p.k->size);
1806
1807         front_frag = offset;
1808         back_frag = offset + sectors;
1809
1810         while (sectors) {
1811                 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors,
1812                                         front_frag, back_frag, flags);
1813                 if (ret < 0)
1814                         break;
1815
1816                 idx += ret;
1817                 sectors = max_t(s64, 0LL, sectors - ret);
1818                 ret = 0;
1819         }
1820
1821         return ret;
1822 }
1823
1824 int bch2_trans_mark_key(struct btree_trans *trans,
1825                         struct bkey_s_c old,
1826                         struct bkey_s_c new,
1827                         unsigned offset, s64 sectors, unsigned flags)
1828 {
1829         struct bch_fs *c = trans->c;
1830         struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
1831         struct replicas_delta_list *d;
1832
1833         BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
1834
1835         switch (k.k->type) {
1836         case KEY_TYPE_btree_ptr:
1837         case KEY_TYPE_btree_ptr_v2:
1838                 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1839                         ?  c->opts.btree_node_size
1840                         : -c->opts.btree_node_size;
1841
1842                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1843                                               flags, BCH_DATA_btree);
1844         case KEY_TYPE_extent:
1845         case KEY_TYPE_reflink_v:
1846                 return bch2_trans_mark_extent(trans, k, offset, sectors,
1847                                               flags, BCH_DATA_user);
1848         case KEY_TYPE_stripe:
1849                 return bch2_trans_mark_stripe(trans, old, new, flags);
1850         case KEY_TYPE_inode: {
1851                 int nr = (new.k->type == KEY_TYPE_inode) -
1852                          (old.k->type == KEY_TYPE_inode);
1853
1854                 if (nr) {
1855                         d = replicas_deltas_realloc(trans, 0);
1856                         d->nr_inodes += nr;
1857                 }
1858
1859                 return 0;
1860         }
1861         case KEY_TYPE_reservation: {
1862                 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1863
1864                 d = replicas_deltas_realloc(trans, 0);
1865
1866                 sectors *= replicas;
1867                 replicas = clamp_t(unsigned, replicas, 1,
1868                                    ARRAY_SIZE(d->persistent_reserved));
1869
1870                 d->persistent_reserved[replicas - 1] += sectors;
1871                 return 0;
1872         }
1873         case KEY_TYPE_reflink_p:
1874                 return bch2_trans_mark_reflink_p(trans,
1875                                         bkey_s_c_to_reflink_p(k),
1876                                         offset, sectors, flags);
1877         default:
1878                 return 0;
1879         }
1880 }
1881
1882 int bch2_trans_mark_update(struct btree_trans *trans,
1883                            struct btree_iter *iter,
1884                            struct bkey_i *new,
1885                            unsigned flags)
1886 {
1887         struct bkey_s_c old;
1888         int ret;
1889
1890         if (unlikely(flags & BTREE_TRIGGER_NORUN))
1891                 return 0;
1892
1893         if (!btree_node_type_needs_gc(iter->btree_id))
1894                 return 0;
1895
1896         if (!btree_node_type_is_extents(iter->btree_id)) {
1897                 if (btree_iter_type(iter) != BTREE_ITER_CACHED) {
1898                         old = bch2_btree_iter_peek_slot(iter);
1899                         ret = bkey_err(old);
1900                         if (ret)
1901                                 return ret;
1902                 } else {
1903                         struct bkey_cached *ck = (void *) iter->l[0].b;
1904
1905                         BUG_ON(!ck->valid);
1906                         old = bkey_i_to_s_c(ck->k);
1907                 }
1908
1909                 if (old.k->type == new->k.type) {
1910                         ret   = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
1911                                         BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
1912                 } else {
1913                         ret   = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
1914                                         BTREE_TRIGGER_INSERT|flags) ?:
1915                                 bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new), 0, 0,
1916                                         BTREE_TRIGGER_OVERWRITE|flags);
1917                 }
1918         } else {
1919                 struct btree_iter *copy;
1920                 struct bkey _old;
1921
1922                 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1923
1924                 bkey_init(&_old);
1925                 old = (struct bkey_s_c) { &_old, NULL };
1926
1927                 ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
1928                                           0, new->k.size,
1929                                           BTREE_TRIGGER_INSERT);
1930                 if (ret)
1931                         return ret;
1932
1933                 copy = bch2_trans_copy_iter(trans, iter);
1934
1935                 for_each_btree_key_continue(copy, 0, old, ret) {
1936                         unsigned offset = 0;
1937                         s64 sectors = -((s64) old.k->size);
1938
1939                         flags |= BTREE_TRIGGER_OVERWRITE;
1940
1941                         if (bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0)
1942                                 break;
1943
1944                         switch (bch2_extent_overlap(&new->k, old.k)) {
1945                         case BCH_EXTENT_OVERLAP_ALL:
1946                                 offset = 0;
1947                                 sectors = -((s64) old.k->size);
1948                                 break;
1949                         case BCH_EXTENT_OVERLAP_BACK:
1950                                 offset = bkey_start_offset(&new->k) -
1951                                         bkey_start_offset(old.k);
1952                                 sectors = bkey_start_offset(&new->k) -
1953                                         old.k->p.offset;
1954                                 break;
1955                         case BCH_EXTENT_OVERLAP_FRONT:
1956                                 offset = 0;
1957                                 sectors = bkey_start_offset(old.k) -
1958                                         new->k.p.offset;
1959                                 break;
1960                         case BCH_EXTENT_OVERLAP_MIDDLE:
1961                                 offset = bkey_start_offset(&new->k) -
1962                                         bkey_start_offset(old.k);
1963                                 sectors = -((s64) new->k.size);
1964                                 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1965                                 break;
1966                         }
1967
1968                         BUG_ON(sectors >= 0);
1969
1970                         ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
1971                                         offset, sectors, flags);
1972                         if (ret)
1973                                 break;
1974                 }
1975                 bch2_trans_iter_put(trans, copy);
1976         }
1977
1978         return ret;
1979 }
1980
1981 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1982                                     struct bch_dev *ca, size_t b,
1983                                     enum bch_data_type type,
1984                                     unsigned sectors)
1985 {
1986         struct bch_fs *c = trans->c;
1987         struct btree_iter *iter;
1988         struct bkey_alloc_unpacked u;
1989         struct bkey_alloc_buf *a;
1990         struct bch_extent_ptr ptr = {
1991                 .dev = ca->dev_idx,
1992                 .offset = bucket_to_sector(ca, b),
1993         };
1994         int ret = 0;
1995
1996         a = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
1997         if (IS_ERR(a))
1998                 return PTR_ERR(a);
1999
2000         if (u.data_type && u.data_type != type) {
2001                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
2002                         "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
2003                         "while marking %s",
2004                         iter->pos.inode, iter->pos.offset, u.gen,
2005                         bch2_data_types[u.data_type],
2006                         bch2_data_types[type],
2007                         bch2_data_types[type]);
2008                 ret = -EIO;
2009                 goto out;
2010         }
2011
2012         u.data_type     = type;
2013         u.dirty_sectors = sectors;
2014
2015         bch2_alloc_pack(c, a, u);
2016         bch2_trans_update(trans, iter, &a->k, 0);
2017 out:
2018         bch2_trans_iter_put(trans, iter);
2019         return ret;
2020 }
2021
2022 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
2023                                     struct bch_dev *ca, size_t b,
2024                                     enum bch_data_type type,
2025                                     unsigned sectors)
2026 {
2027         return __bch2_trans_do(trans, NULL, NULL, 0,
2028                         __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
2029 }
2030
2031 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
2032                                             struct bch_dev *ca,
2033                                             u64 start, u64 end,
2034                                             enum bch_data_type type,
2035                                             u64 *bucket, unsigned *bucket_sectors)
2036 {
2037         do {
2038                 u64 b = sector_to_bucket(ca, start);
2039                 unsigned sectors =
2040                         min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
2041
2042                 if (b != *bucket && *bucket_sectors) {
2043                         int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
2044                                                                   type, *bucket_sectors);
2045                         if (ret)
2046                                 return ret;
2047
2048                         *bucket_sectors = 0;
2049                 }
2050
2051                 *bucket         = b;
2052                 *bucket_sectors += sectors;
2053                 start += sectors;
2054         } while (start < end);
2055
2056         return 0;
2057 }
2058
2059 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
2060                                     struct bch_dev *ca)
2061 {
2062         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
2063         u64 bucket = 0;
2064         unsigned i, bucket_sectors = 0;
2065         int ret;
2066
2067         for (i = 0; i < layout->nr_superblocks; i++) {
2068                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
2069
2070                 if (offset == BCH_SB_SECTOR) {
2071                         ret = bch2_trans_mark_metadata_sectors(trans, ca,
2072                                                 0, BCH_SB_SECTOR,
2073                                                 BCH_DATA_sb, &bucket, &bucket_sectors);
2074                         if (ret)
2075                                 return ret;
2076                 }
2077
2078                 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
2079                                       offset + (1 << layout->sb_max_size_bits),
2080                                       BCH_DATA_sb, &bucket, &bucket_sectors);
2081                 if (ret)
2082                         return ret;
2083         }
2084
2085         if (bucket_sectors) {
2086                 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2087                                 bucket, BCH_DATA_sb, bucket_sectors);
2088                 if (ret)
2089                         return ret;
2090         }
2091
2092         for (i = 0; i < ca->journal.nr; i++) {
2093                 ret = bch2_trans_mark_metadata_bucket(trans, ca,
2094                                 ca->journal.buckets[i],
2095                                 BCH_DATA_journal, ca->mi.bucket_size);
2096                 if (ret)
2097                         return ret;
2098         }
2099
2100         return 0;
2101 }
2102
2103 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
2104 {
2105         return bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
2106                         __bch2_trans_mark_dev_sb(&trans, ca));
2107 }
2108
2109 /* Disk reservations: */
2110
2111 #define SECTORS_CACHE   1024
2112
2113 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
2114                               u64 sectors, int flags)
2115 {
2116         struct bch_fs_pcpu *pcpu;
2117         u64 old, v, get;
2118         s64 sectors_available;
2119         int ret;
2120
2121         percpu_down_read(&c->mark_lock);
2122         preempt_disable();
2123         pcpu = this_cpu_ptr(c->pcpu);
2124
2125         if (sectors <= pcpu->sectors_available)
2126                 goto out;
2127
2128         v = atomic64_read(&c->sectors_available);
2129         do {
2130                 old = v;
2131                 get = min((u64) sectors + SECTORS_CACHE, old);
2132
2133                 if (get < sectors) {
2134                         preempt_enable();
2135                         goto recalculate;
2136                 }
2137         } while ((v = atomic64_cmpxchg(&c->sectors_available,
2138                                        old, old - get)) != old);
2139
2140         pcpu->sectors_available         += get;
2141
2142 out:
2143         pcpu->sectors_available         -= sectors;
2144         this_cpu_add(*c->online_reserved, sectors);
2145         res->sectors                    += sectors;
2146
2147         preempt_enable();
2148         percpu_up_read(&c->mark_lock);
2149         return 0;
2150
2151 recalculate:
2152         mutex_lock(&c->sectors_available_lock);
2153
2154         percpu_u64_set(&c->pcpu->sectors_available, 0);
2155         sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
2156
2157         if (sectors <= sectors_available ||
2158             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
2159                 atomic64_set(&c->sectors_available,
2160                              max_t(s64, 0, sectors_available - sectors));
2161                 this_cpu_add(*c->online_reserved, sectors);
2162                 res->sectors                    += sectors;
2163                 ret = 0;
2164         } else {
2165                 atomic64_set(&c->sectors_available, sectors_available);
2166                 ret = -ENOSPC;
2167         }
2168
2169         mutex_unlock(&c->sectors_available_lock);
2170         percpu_up_read(&c->mark_lock);
2171
2172         return ret;
2173 }
2174
2175 /* Startup/shutdown: */
2176
2177 static void buckets_free_rcu(struct rcu_head *rcu)
2178 {
2179         struct bucket_array *buckets =
2180                 container_of(rcu, struct bucket_array, rcu);
2181
2182         kvpfree(buckets,
2183                 sizeof(struct bucket_array) +
2184                 buckets->nbuckets * sizeof(struct bucket));
2185 }
2186
2187 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
2188 {
2189         struct bucket_array *buckets = NULL, *old_buckets = NULL;
2190         unsigned long *buckets_nouse = NULL;
2191         alloc_fifo      free[RESERVE_NR];
2192         alloc_fifo      free_inc;
2193         alloc_heap      alloc_heap;
2194
2195         size_t btree_reserve    = DIV_ROUND_UP(BTREE_NODE_RESERVE,
2196                              ca->mi.bucket_size / c->opts.btree_node_size);
2197         /* XXX: these should be tunable */
2198         size_t reserve_none     = max_t(size_t, 1, nbuckets >> 9);
2199         size_t copygc_reserve   = max_t(size_t, 2, nbuckets >> 6);
2200         size_t free_inc_nr      = max(max_t(size_t, 1, nbuckets >> 12),
2201                                       btree_reserve * 2);
2202         bool resize = ca->buckets[0] != NULL;
2203         int ret = -ENOMEM;
2204         unsigned i;
2205
2206         memset(&free,           0, sizeof(free));
2207         memset(&free_inc,       0, sizeof(free_inc));
2208         memset(&alloc_heap,     0, sizeof(alloc_heap));
2209
2210         if (!(buckets           = kvpmalloc(sizeof(struct bucket_array) +
2211                                             nbuckets * sizeof(struct bucket),
2212                                             GFP_KERNEL|__GFP_ZERO)) ||
2213             !(buckets_nouse     = kvpmalloc(BITS_TO_LONGS(nbuckets) *
2214                                             sizeof(unsigned long),
2215                                             GFP_KERNEL|__GFP_ZERO)) ||
2216             !init_fifo(&free[RESERVE_MOVINGGC],
2217                        copygc_reserve, GFP_KERNEL) ||
2218             !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
2219             !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
2220             !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
2221                 goto err;
2222
2223         buckets->first_bucket   = ca->mi.first_bucket;
2224         buckets->nbuckets       = nbuckets;
2225
2226         bch2_copygc_stop(c);
2227
2228         if (resize) {
2229                 down_write(&c->gc_lock);
2230                 down_write(&ca->bucket_lock);
2231                 percpu_down_write(&c->mark_lock);
2232         }
2233
2234         old_buckets = bucket_array(ca);
2235
2236         if (resize) {
2237                 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2238
2239                 memcpy(buckets->b,
2240                        old_buckets->b,
2241                        n * sizeof(struct bucket));
2242                 memcpy(buckets_nouse,
2243                        ca->buckets_nouse,
2244                        BITS_TO_LONGS(n) * sizeof(unsigned long));
2245         }
2246
2247         rcu_assign_pointer(ca->buckets[0], buckets);
2248         buckets = old_buckets;
2249
2250         swap(ca->buckets_nouse, buckets_nouse);
2251
2252         if (resize) {
2253                 percpu_up_write(&c->mark_lock);
2254                 up_write(&c->gc_lock);
2255         }
2256
2257         spin_lock(&c->freelist_lock);
2258         for (i = 0; i < RESERVE_NR; i++) {
2259                 fifo_move(&free[i], &ca->free[i]);
2260                 swap(ca->free[i], free[i]);
2261         }
2262         fifo_move(&free_inc, &ca->free_inc);
2263         swap(ca->free_inc, free_inc);
2264         spin_unlock(&c->freelist_lock);
2265
2266         /* with gc lock held, alloc_heap can't be in use: */
2267         swap(ca->alloc_heap, alloc_heap);
2268
2269         nbuckets = ca->mi.nbuckets;
2270
2271         if (resize)
2272                 up_write(&ca->bucket_lock);
2273
2274         ret = 0;
2275 err:
2276         free_heap(&alloc_heap);
2277         free_fifo(&free_inc);
2278         for (i = 0; i < RESERVE_NR; i++)
2279                 free_fifo(&free[i]);
2280         kvpfree(buckets_nouse,
2281                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2282         if (buckets)
2283                 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2284
2285         return ret;
2286 }
2287
2288 void bch2_dev_buckets_free(struct bch_dev *ca)
2289 {
2290         unsigned i;
2291
2292         free_heap(&ca->alloc_heap);
2293         free_fifo(&ca->free_inc);
2294         for (i = 0; i < RESERVE_NR; i++)
2295                 free_fifo(&ca->free[i]);
2296         kvpfree(ca->buckets_nouse,
2297                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2298         kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2299                 sizeof(struct bucket_array) +
2300                 ca->mi.nbuckets * sizeof(struct bucket));
2301
2302         for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
2303                 free_percpu(ca->usage[i]);
2304         kfree(ca->usage_base);
2305 }
2306
2307 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2308 {
2309         unsigned i;
2310
2311         ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
2312         if (!ca->usage_base)
2313                 return -ENOMEM;
2314
2315         for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
2316                 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
2317                 if (!ca->usage[i])
2318                         return -ENOMEM;
2319         }
2320
2321         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
2322 }