1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
8 * - free bucket: mark == 0
9 * The bucket contains no data and will not be read
11 * - allocator bucket: owned_by_allocator == 1
12 * The bucket is on a free list, or it is an open bucket
14 * - cached bucket: owned_by_allocator == 0 &&
15 * dirty_sectors == 0 &&
17 * The bucket contains data but may be safely discarded as there are
18 * enough replicas of the data on other cache devices, or it has been
19 * written back to the backing device
21 * - dirty bucket: owned_by_allocator == 0 &&
23 * The bucket contains data that we must not discard (either only copy,
24 * or one of the 'main copies' for data requiring multiple replicas)
26 * - metadata bucket: owned_by_allocator == 0 && is_metadata == 1
27 * This is a btree node, journal or gen/prio bucket
31 * bucket invalidated => bucket on freelist => open bucket =>
32 * [dirty bucket =>] cached bucket => bucket invalidated => ...
34 * Note that cache promotion can skip the dirty bucket step, as data
35 * is copied from a deeper tier to a shallower tier, onto a cached
37 * Note also that a cached bucket can spontaneously become dirty --
40 * Only a traversal of the key space can determine whether a bucket is
41 * truly dirty or cached.
45 * - free => allocator: bucket was invalidated
46 * - cached => allocator: bucket was invalidated
48 * - allocator => dirty: open bucket was filled up
49 * - allocator => cached: open bucket was filled up
50 * - allocator => metadata: metadata was allocated
52 * - dirty => cached: dirty sectors were copied to a deeper tier
53 * - dirty => free: dirty sectors were overwritten or moved (copy gc)
54 * - cached => free: cached sectors were overwritten
56 * - metadata => free: metadata was freed
59 * - cached => dirty: a device was removed so formerly replicated data
60 * is no longer sufficiently replicated
61 * - free => cached: cannot happen
62 * - free => dirty: cannot happen
63 * - free => metadata: cannot happen
67 #include "alloc_background.h"
70 #include "btree_update.h"
77 #include <linux/preempt.h>
78 #include <trace/events/bcachefs.h>
81 * Clear journal_seq_valid for buckets for which it's not needed, to prevent
84 void bch2_bucket_seq_cleanup(struct bch_fs *c)
86 u64 journal_seq = atomic64_read(&c->journal.seq);
87 u16 last_seq_ondisk = c->journal.last_seq_ondisk;
89 struct bucket_array *buckets;
94 if (journal_seq - c->last_bucket_seq_cleanup <
95 (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
98 c->last_bucket_seq_cleanup = journal_seq;
100 for_each_member_device(ca, c, i) {
101 down_read(&ca->bucket_lock);
102 buckets = bucket_array(ca);
104 for_each_bucket(g, buckets) {
105 bucket_cmpxchg(g, m, ({
106 if (!m.journal_seq_valid ||
107 bucket_needs_journal_commit(m, last_seq_ondisk))
110 m.journal_seq_valid = 0;
113 up_read(&ca->bucket_lock);
117 void bch2_fs_usage_initialize(struct bch_fs *c)
119 struct bch_fs_usage *usage;
122 percpu_down_write(&c->mark_lock);
123 usage = c->usage_base;
125 bch2_fs_usage_acc_to_base(c, 0);
126 bch2_fs_usage_acc_to_base(c, 1);
128 for (i = 0; i < BCH_REPLICAS_MAX; i++)
129 usage->reserved += usage->persistent_reserved[i];
131 for (i = 0; i < c->replicas.nr; i++) {
132 struct bch_replicas_entry *e =
133 cpu_replicas_entry(&c->replicas, i);
135 switch (e->data_type) {
137 usage->btree += usage->replicas[i];
140 usage->data += usage->replicas[i];
142 case BCH_DATA_CACHED:
143 usage->cached += usage->replicas[i];
148 percpu_up_write(&c->mark_lock);
151 void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
153 if (fs_usage == c->usage_scratch)
154 mutex_unlock(&c->usage_scratch_lock);
159 struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
161 struct bch_fs_usage *ret;
162 unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
164 ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
168 if (mutex_trylock(&c->usage_scratch_lock))
171 ret = kzalloc(bytes, GFP_NOFS);
175 mutex_lock(&c->usage_scratch_lock);
177 ret = c->usage_scratch;
178 memset(ret, 0, bytes);
182 struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
184 struct bch_dev_usage ret;
186 memset(&ret, 0, sizeof(ret));
187 acc_u64s_percpu((u64 *) &ret,
188 (u64 __percpu *) ca->usage[0],
189 sizeof(ret) / sizeof(u64));
194 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
195 unsigned journal_seq,
198 return this_cpu_ptr(gc
200 : c->usage[journal_seq & 1]);
203 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
205 ssize_t offset = v - (u64 *) c->usage_base;
209 BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
210 percpu_rwsem_assert_held(&c->mark_lock);
213 seq = read_seqcount_begin(&c->usage_lock);
215 percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
216 percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
217 } while (read_seqcount_retry(&c->usage_lock, seq));
222 struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
224 struct bch_fs_usage *ret;
225 unsigned seq, v, u64s = fs_usage_u64s(c);
227 ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
231 percpu_down_read(&c->mark_lock);
233 v = fs_usage_u64s(c);
234 if (unlikely(u64s != v)) {
236 percpu_up_read(&c->mark_lock);
242 seq = read_seqcount_begin(&c->usage_lock);
243 memcpy(ret, c->usage_base, u64s * sizeof(u64));
244 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
245 acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[1], u64s);
246 } while (read_seqcount_retry(&c->usage_lock, seq));
251 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
253 unsigned u64s = fs_usage_u64s(c);
257 write_seqcount_begin(&c->usage_lock);
259 acc_u64s_percpu((u64 *) c->usage_base,
260 (u64 __percpu *) c->usage[idx], u64s);
261 percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
263 write_seqcount_end(&c->usage_lock);
266 void bch2_fs_usage_to_text(struct printbuf *out,
268 struct bch_fs_usage *fs_usage)
272 pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
274 pr_buf(out, "hidden:\t\t\t\t%llu\n",
276 pr_buf(out, "data:\t\t\t\t%llu\n",
278 pr_buf(out, "cached:\t\t\t\t%llu\n",
280 pr_buf(out, "reserved:\t\t\t%llu\n",
282 pr_buf(out, "nr_inodes:\t\t\t%llu\n",
283 fs_usage->nr_inodes);
284 pr_buf(out, "online reserved:\t\t%llu\n",
285 fs_usage->online_reserved);
288 i < ARRAY_SIZE(fs_usage->persistent_reserved);
290 pr_buf(out, "%u replicas:\n", i + 1);
291 pr_buf(out, "\treserved:\t\t%llu\n",
292 fs_usage->persistent_reserved[i]);
295 for (i = 0; i < c->replicas.nr; i++) {
296 struct bch_replicas_entry *e =
297 cpu_replicas_entry(&c->replicas, i);
300 bch2_replicas_entry_to_text(out, e);
301 pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
305 #define RESERVE_FACTOR 6
307 static u64 reserve_factor(u64 r)
309 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
312 static u64 avail_factor(u64 r)
314 return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
317 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
319 return min(fs_usage->hidden +
322 reserve_factor(fs_usage->reserved +
323 fs_usage->online_reserved),
327 static struct bch_fs_usage_short
328 __bch2_fs_usage_read_short(struct bch_fs *c)
330 struct bch_fs_usage_short ret;
333 ret.capacity = c->capacity -
334 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
336 data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
337 bch2_fs_usage_read_one(c, &c->usage_base->btree);
338 reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
339 bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
341 ret.used = min(ret.capacity, data + reserve_factor(reserved));
342 ret.free = ret.capacity - ret.used;
344 ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
349 struct bch_fs_usage_short
350 bch2_fs_usage_read_short(struct bch_fs *c)
352 struct bch_fs_usage_short ret;
354 percpu_down_read(&c->mark_lock);
355 ret = __bch2_fs_usage_read_short(c);
356 percpu_up_read(&c->mark_lock);
361 static inline int is_unavailable_bucket(struct bucket_mark m)
363 return !is_available_bucket(m);
366 static inline int is_fragmented_bucket(struct bucket_mark m,
369 if (!m.owned_by_allocator &&
370 m.data_type == BCH_DATA_USER &&
371 bucket_sectors_used(m))
372 return max_t(int, 0, (int) ca->mi.bucket_size -
373 bucket_sectors_used(m));
377 static inline enum bch_data_type bucket_type(struct bucket_mark m)
379 return m.cached_sectors && !m.dirty_sectors
384 static bool bucket_became_unavailable(struct bucket_mark old,
385 struct bucket_mark new)
387 return is_available_bucket(old) &&
388 !is_available_bucket(new);
391 int bch2_fs_usage_apply(struct bch_fs *c,
392 struct bch_fs_usage *fs_usage,
393 struct disk_reservation *disk_res,
394 unsigned journal_seq)
396 s64 added = fs_usage->data + fs_usage->reserved;
397 s64 should_not_have_added;
400 percpu_rwsem_assert_held(&c->mark_lock);
403 * Not allowed to reduce sectors_available except by getting a
406 should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
407 if (WARN_ONCE(should_not_have_added > 0,
408 "disk usage increased by %lli without a reservation",
409 should_not_have_added)) {
410 atomic64_sub(should_not_have_added, &c->sectors_available);
411 added -= should_not_have_added;
416 disk_res->sectors -= added;
417 fs_usage->online_reserved -= added;
421 acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
422 (u64 *) fs_usage, fs_usage_u64s(c));
428 static inline void account_bucket(struct bch_fs_usage *fs_usage,
429 struct bch_dev_usage *dev_usage,
430 enum bch_data_type type,
433 if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
434 fs_usage->hidden += size;
436 dev_usage->buckets[type] += nr;
439 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
440 struct bch_fs_usage *fs_usage,
441 struct bucket_mark old, struct bucket_mark new,
444 struct bch_dev_usage *dev_usage;
446 percpu_rwsem_assert_held(&c->mark_lock);
449 dev_usage = this_cpu_ptr(ca->usage[gc]);
451 if (bucket_type(old))
452 account_bucket(fs_usage, dev_usage, bucket_type(old),
453 -1, -ca->mi.bucket_size);
455 if (bucket_type(new))
456 account_bucket(fs_usage, dev_usage, bucket_type(new),
457 1, ca->mi.bucket_size);
459 dev_usage->buckets_alloc +=
460 (int) new.owned_by_allocator - (int) old.owned_by_allocator;
461 dev_usage->buckets_ec +=
462 (int) new.stripe - (int) old.stripe;
463 dev_usage->buckets_unavailable +=
464 is_unavailable_bucket(new) - is_unavailable_bucket(old);
466 dev_usage->sectors[old.data_type] -= old.dirty_sectors;
467 dev_usage->sectors[new.data_type] += new.dirty_sectors;
468 dev_usage->sectors[BCH_DATA_CACHED] +=
469 (int) new.cached_sectors - (int) old.cached_sectors;
470 dev_usage->sectors_fragmented +=
471 is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
474 if (!is_available_bucket(old) && is_available_bucket(new))
475 bch2_wake_allocator(ca);
478 void bch2_dev_usage_from_buckets(struct bch_fs *c)
481 struct bucket_mark old = { .v.counter = 0 };
482 struct bucket_array *buckets;
487 c->usage_base->hidden = 0;
489 for_each_member_device(ca, c, i) {
490 for_each_possible_cpu(cpu)
491 memset(per_cpu_ptr(ca->usage[0], cpu), 0,
492 sizeof(*ca->usage[0]));
494 buckets = bucket_array(ca);
496 for_each_bucket(g, buckets)
497 bch2_dev_usage_update(c, ca, c->usage_base,
498 old, g->mark, false);
502 static inline int update_replicas(struct bch_fs *c,
503 struct bch_fs_usage *fs_usage,
504 struct bch_replicas_entry *r,
507 int idx = bch2_replicas_entry_idx(c, r);
515 switch (r->data_type) {
517 fs_usage->btree += sectors;
520 fs_usage->data += sectors;
522 case BCH_DATA_CACHED:
523 fs_usage->cached += sectors;
526 fs_usage->replicas[idx] += sectors;
530 static inline void update_cached_sectors(struct bch_fs *c,
531 struct bch_fs_usage *fs_usage,
532 unsigned dev, s64 sectors)
534 struct bch_replicas_padded r;
536 bch2_replicas_entry_cached(&r.e, dev);
538 update_replicas(c, fs_usage, &r.e, sectors);
541 static struct replicas_delta_list *
542 replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
544 struct replicas_delta_list *d = trans->fs_usage_deltas;
545 unsigned new_size = d ? (d->size + more) * 2 : 128;
547 if (!d || d->used + more > d->size) {
548 d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO);
552 trans->fs_usage_deltas = d;
557 static inline void update_replicas_list(struct btree_trans *trans,
558 struct bch_replicas_entry *r,
561 struct replicas_delta_list *d;
562 struct replicas_delta *n;
568 b = replicas_entry_bytes(r) + 8;
569 d = replicas_deltas_realloc(trans, b);
571 n = (void *) d->d + d->used;
573 memcpy(&n->r, r, replicas_entry_bytes(r));
577 static inline void update_cached_sectors_list(struct btree_trans *trans,
578 unsigned dev, s64 sectors)
580 struct bch_replicas_padded r;
582 bch2_replicas_entry_cached(&r.e, dev);
584 update_replicas_list(trans, &r.e, sectors);
587 static inline struct replicas_delta *
588 replicas_delta_next(struct replicas_delta *d)
590 return (void *) d + replicas_entry_bytes(&d->r) + 8;
593 int bch2_replicas_delta_list_apply(struct bch_fs *c,
594 struct bch_fs_usage *fs_usage,
595 struct replicas_delta_list *r)
597 struct replicas_delta *d = r->d;
598 struct replicas_delta *top = (void *) r->d + r->used;
601 for (d = r->d; d != top; d = replicas_delta_next(d))
602 if (update_replicas(c, fs_usage, &d->r, d->delta)) {
610 fs_usage->nr_inodes += r->nr_inodes;
612 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
613 fs_usage->reserved += r->persistent_reserved[i];
614 fs_usage->persistent_reserved[i] += r->persistent_reserved[i];
619 for (d = r->d; d != top; d = replicas_delta_next(d))
620 update_replicas(c, fs_usage, &d->r, -d->delta);
624 #define do_mark_fn(fn, c, pos, flags, ...) \
628 percpu_rwsem_assert_held(&c->mark_lock); \
630 for (gc = 0; gc < 2 && !ret; gc++) \
631 if (!gc == !(flags & BTREE_TRIGGER_GC) || \
632 (gc && gc_visited(c, pos))) \
633 ret = fn(c, __VA_ARGS__, gc); \
637 static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
638 size_t b, struct bucket_mark *ret,
641 struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
642 struct bucket *g = __bucket(ca, b, gc);
643 struct bucket_mark old, new;
645 old = bucket_cmpxchg(g, new, ({
646 BUG_ON(!is_available_bucket(new));
648 new.owned_by_allocator = true;
650 new.cached_sectors = 0;
651 new.dirty_sectors = 0;
655 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
657 if (old.cached_sectors)
658 update_cached_sectors(c, fs_usage, ca->dev_idx,
659 -((s64) old.cached_sectors));
666 void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
667 size_t b, struct bucket_mark *old)
669 do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
672 if (!old->owned_by_allocator && old->cached_sectors)
673 trace_invalidate(ca, bucket_to_sector(ca, b),
674 old->cached_sectors);
677 static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
678 size_t b, bool owned_by_allocator,
681 struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
682 struct bucket *g = __bucket(ca, b, gc);
683 struct bucket_mark old, new;
685 old = bucket_cmpxchg(g, new, ({
686 new.owned_by_allocator = owned_by_allocator;
689 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
692 !owned_by_allocator && !old.owned_by_allocator);
697 void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
698 size_t b, bool owned_by_allocator,
699 struct gc_pos pos, unsigned flags)
703 do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
704 ca, b, owned_by_allocator);
709 static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
710 struct bch_fs_usage *fs_usage,
711 u64 journal_seq, unsigned flags)
713 bool gc = flags & BTREE_TRIGGER_GC;
714 struct bkey_alloc_unpacked u;
717 struct bucket_mark old, m;
720 * alloc btree is read in by bch2_alloc_read, not gc:
722 if ((flags & BTREE_TRIGGER_GC) &&
723 !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
726 ca = bch_dev_bkey_exists(c, k.k->p.inode);
728 if (k.k->p.offset >= ca->mi.nbuckets)
731 g = __bucket(ca, k.k->p.offset, gc);
732 u = bch2_alloc_unpack(k);
734 old = bucket_cmpxchg(g, m, ({
736 m.data_type = u.data_type;
737 m.dirty_sectors = u.dirty_sectors;
738 m.cached_sectors = u.cached_sectors;
741 m.journal_seq_valid = 1;
742 m.journal_seq = journal_seq;
746 if (!(flags & BTREE_TRIGGER_ALLOC_READ))
747 bch2_dev_usage_update(c, ca, fs_usage, old, m, gc);
749 g->io_time[READ] = u.read_time;
750 g->io_time[WRITE] = u.write_time;
751 g->oldest_gen = u.oldest_gen;
755 * need to know if we're getting called from the invalidate path or
759 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
760 old.cached_sectors) {
761 update_cached_sectors(c, fs_usage, ca->dev_idx,
762 -old.cached_sectors);
763 trace_invalidate(ca, bucket_to_sector(ca, k.k->p.offset),
770 #define checked_add(a, b) \
772 unsigned _res = (unsigned) (a) + (b); \
773 bool overflow = _res > U16_MAX; \
780 static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
781 size_t b, enum bch_data_type data_type,
782 unsigned sectors, bool gc)
784 struct bucket *g = __bucket(ca, b, gc);
785 struct bucket_mark old, new;
788 BUG_ON(data_type != BCH_DATA_SB &&
789 data_type != BCH_DATA_JOURNAL);
791 old = bucket_cmpxchg(g, new, ({
792 new.data_type = data_type;
793 overflow = checked_add(new.dirty_sectors, sectors);
796 bch2_fs_inconsistent_on(old.data_type &&
797 old.data_type != data_type, c,
798 "different types of data in same bucket: %s, %s",
799 bch2_data_types[old.data_type],
800 bch2_data_types[data_type]);
802 bch2_fs_inconsistent_on(overflow, c,
803 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
804 ca->dev_idx, b, new.gen,
805 bch2_data_types[old.data_type ?: data_type],
806 old.dirty_sectors, sectors);
809 bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc),
815 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
816 size_t b, enum bch_data_type type,
817 unsigned sectors, struct gc_pos pos,
820 BUG_ON(type != BCH_DATA_SB &&
821 type != BCH_DATA_JOURNAL);
826 do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
827 ca, b, type, sectors);
829 __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
835 static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
837 return DIV_ROUND_UP(sectors * n, d);
840 static s64 __ptr_disk_sectors_delta(unsigned old_size,
841 unsigned offset, s64 delta,
843 unsigned n, unsigned d)
847 if (flags & BTREE_TRIGGER_OVERWRITE_SPLIT) {
848 BUG_ON(offset + -delta > old_size);
850 return -disk_sectors_scaled(n, d, old_size) +
851 disk_sectors_scaled(n, d, offset) +
852 disk_sectors_scaled(n, d, old_size - offset + delta);
853 } else if (flags & BTREE_TRIGGER_OVERWRITE) {
854 BUG_ON(offset + -delta > old_size);
856 return -disk_sectors_scaled(n, d, old_size) +
857 disk_sectors_scaled(n, d, old_size + delta);
859 return disk_sectors_scaled(n, d, delta);
863 static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
864 unsigned offset, s64 delta,
867 return __ptr_disk_sectors_delta(p.crc.live_size,
868 offset, delta, flags,
869 p.crc.compressed_size,
870 p.crc.uncompressed_size);
873 static void bucket_set_stripe(struct bch_fs *c,
874 const struct bch_stripe *v,
875 struct bch_fs_usage *fs_usage,
879 bool enabled = !(flags & BTREE_TRIGGER_OVERWRITE);
880 bool gc = flags & BTREE_TRIGGER_GC;
883 for (i = 0; i < v->nr_blocks; i++) {
884 const struct bch_extent_ptr *ptr = v->ptrs + i;
885 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
886 struct bucket *g = PTR_BUCKET(ca, ptr, gc);
887 struct bucket_mark new, old;
889 old = bucket_cmpxchg(g, new, ({
890 new.stripe = enabled;
892 new.journal_seq_valid = 1;
893 new.journal_seq = journal_seq;
897 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
900 * XXX write repair code for these, flag stripe as possibly bad
902 if (old.gen != ptr->gen)
903 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
904 "stripe with stale pointer");
907 * We'd like to check for these, but these checks don't work
910 if (old.stripe && enabled)
911 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
912 "multiple stripes using same bucket");
914 if (!old.stripe && !enabled)
915 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
916 "deleting stripe but bucket not marked as stripe bucket");
921 static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
922 struct extent_ptr_decoded p,
923 s64 sectors, enum bch_data_type ptr_data_type,
924 u8 bucket_gen, u8 *bucket_data_type,
925 u16 *dirty_sectors, u16 *cached_sectors)
927 u16 *dst_sectors = !p.ptr.cached
930 u16 orig_sectors = *dst_sectors;
933 if (gen_after(p.ptr.gen, bucket_gen)) {
934 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
935 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
937 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
939 bch2_data_types[*bucket_data_type ?: ptr_data_type],
941 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
945 if (gen_cmp(bucket_gen, p.ptr.gen) >= 96U) {
946 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
947 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
949 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
951 bch2_data_types[*bucket_data_type ?: ptr_data_type],
953 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
957 if (bucket_gen != p.ptr.gen && !p.ptr.cached) {
958 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
959 "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
961 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
963 bch2_data_types[*bucket_data_type ?: ptr_data_type],
965 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
969 if (bucket_gen != p.ptr.gen)
972 if (*bucket_data_type && *bucket_data_type != ptr_data_type) {
973 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
974 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
976 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
978 bch2_data_types[*bucket_data_type],
979 bch2_data_types[ptr_data_type],
980 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
984 if (checked_add(*dst_sectors, sectors)) {
985 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
986 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
988 p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
990 bch2_data_types[*bucket_data_type ?: ptr_data_type],
991 orig_sectors, sectors,
992 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
996 *bucket_data_type = *dirty_sectors || *cached_sectors
1001 static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
1002 struct extent_ptr_decoded p,
1003 s64 sectors, enum bch_data_type data_type,
1004 struct bch_fs_usage *fs_usage,
1005 u64 journal_seq, unsigned flags)
1007 bool gc = flags & BTREE_TRIGGER_GC;
1008 struct bucket_mark old, new;
1009 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1010 struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
1011 u8 bucket_data_type;
1015 v = atomic64_read(&g->_mark.v);
1017 new.v.counter = old.v.counter = v;
1018 bucket_data_type = new.data_type;
1020 ret = __mark_pointer(c, k, p, sectors, data_type, new.gen,
1023 &new.cached_sectors);
1027 new.data_type = bucket_data_type;
1030 new.journal_seq_valid = 1;
1031 new.journal_seq = journal_seq;
1034 if (flags & BTREE_TRIGGER_NOATOMIC) {
1038 } while ((v = atomic64_cmpxchg(&g->_mark.v,
1040 new.v.counter)) != old.v.counter);
1042 bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
1044 BUG_ON(!gc && bucket_became_unavailable(old, new));
1049 static int bch2_mark_stripe_ptr(struct bch_fs *c,
1050 struct bch_extent_stripe_ptr p,
1051 enum bch_data_type data_type,
1052 struct bch_fs_usage *fs_usage,
1053 s64 sectors, unsigned flags,
1054 struct bch_replicas_padded *r,
1056 unsigned *nr_parity)
1058 bool gc = flags & BTREE_TRIGGER_GC;
1061 int blocks_nonempty_delta;
1063 m = genradix_ptr(&c->stripes[gc], p.idx);
1065 spin_lock(&c->ec_stripes_heap_lock);
1067 if (!m || !m->alive) {
1068 spin_unlock(&c->ec_stripes_heap_lock);
1069 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
1074 BUG_ON(m->r.e.data_type != data_type);
1076 *nr_data = m->nr_blocks - m->nr_redundant;
1077 *nr_parity = m->nr_redundant;
1080 old = m->block_sectors[p.block];
1081 m->block_sectors[p.block] += sectors;
1082 new = m->block_sectors[p.block];
1084 blocks_nonempty_delta = (int) !!new - (int) !!old;
1085 if (blocks_nonempty_delta) {
1086 m->blocks_nonempty += blocks_nonempty_delta;
1089 bch2_stripes_heap_update(c, m, p.idx);
1094 spin_unlock(&c->ec_stripes_heap_lock);
1099 static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
1100 unsigned offset, s64 sectors,
1101 enum bch_data_type data_type,
1102 struct bch_fs_usage *fs_usage,
1103 unsigned journal_seq, unsigned flags)
1105 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1106 const union bch_extent_entry *entry;
1107 struct extent_ptr_decoded p;
1108 struct bch_replicas_padded r;
1109 s64 dirty_sectors = 0;
1113 r.e.data_type = data_type;
1115 r.e.nr_required = 1;
1119 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1120 s64 disk_sectors = data_type == BCH_DATA_BTREE
1122 : ptr_disk_sectors_delta(p, offset, sectors, flags);
1124 ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
1125 fs_usage, journal_seq, flags);
1133 update_cached_sectors(c, fs_usage, p.ptr.dev,
1135 } else if (!p.has_ec) {
1136 dirty_sectors += disk_sectors;
1137 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1139 struct bch_replicas_padded ec_r;
1140 unsigned nr_data, nr_parity;
1143 ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
1144 fs_usage, disk_sectors, flags,
1145 &ec_r, &nr_data, &nr_parity);
1150 __ptr_disk_sectors_delta(p.crc.live_size,
1151 offset, sectors, flags,
1152 p.crc.compressed_size * nr_parity,
1153 p.crc.uncompressed_size * nr_data);
1155 update_replicas(c, fs_usage, &ec_r.e,
1156 disk_sectors + parity_sectors);
1159 * There may be other dirty pointers in this extent, but
1160 * if so they're not required for mounting if we have an
1161 * erasure coded pointer in this extent:
1163 r.e.nr_required = 0;
1168 update_replicas(c, fs_usage, &r.e, dirty_sectors);
1173 static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
1174 struct bch_fs_usage *fs_usage,
1175 u64 journal_seq, unsigned flags)
1177 bool gc = flags & BTREE_TRIGGER_GC;
1178 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
1179 size_t idx = s.k->p.offset;
1180 struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
1183 spin_lock(&c->ec_stripes_heap_lock);
1185 if (!m || ((flags & BTREE_TRIGGER_OVERWRITE) && !m->alive)) {
1186 spin_unlock(&c->ec_stripes_heap_lock);
1187 bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
1192 if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
1193 m->sectors = le16_to_cpu(s.v->sectors);
1194 m->algorithm = s.v->algorithm;
1195 m->nr_blocks = s.v->nr_blocks;
1196 m->nr_redundant = s.v->nr_redundant;
1198 bch2_bkey_to_replicas(&m->r.e, k);
1201 * XXX: account for stripes somehow here
1204 update_replicas(c, fs_usage, &m->r.e, stripe_sectors);
1207 /* gc recalculates these fields: */
1208 if (!(flags & BTREE_TRIGGER_GC)) {
1209 for (i = 0; i < s.v->nr_blocks; i++) {
1210 m->block_sectors[i] =
1211 stripe_blockcount_get(s.v, i);
1212 m->blocks_nonempty += !!m->block_sectors[i];
1217 bch2_stripes_heap_update(c, m, idx);
1221 bch2_stripes_heap_del(c, m, idx);
1222 memset(m, 0, sizeof(*m));
1225 spin_unlock(&c->ec_stripes_heap_lock);
1227 bucket_set_stripe(c, s.v, fs_usage, 0, flags);
1231 static int bch2_mark_key_locked(struct bch_fs *c,
1233 unsigned offset, s64 sectors,
1234 struct bch_fs_usage *fs_usage,
1235 u64 journal_seq, unsigned flags)
1241 if (!fs_usage || (flags & BTREE_TRIGGER_GC))
1242 fs_usage = fs_usage_ptr(c, journal_seq,
1243 flags & BTREE_TRIGGER_GC);
1245 switch (k.k->type) {
1246 case KEY_TYPE_alloc:
1247 ret = bch2_mark_alloc(c, k, fs_usage, journal_seq, flags);
1249 case KEY_TYPE_btree_ptr:
1250 case KEY_TYPE_btree_ptr_v2:
1251 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1252 ? c->opts.btree_node_size
1253 : -c->opts.btree_node_size;
1255 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_BTREE,
1256 fs_usage, journal_seq, flags);
1258 case KEY_TYPE_extent:
1259 case KEY_TYPE_reflink_v:
1260 ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_USER,
1261 fs_usage, journal_seq, flags);
1263 case KEY_TYPE_stripe:
1264 ret = bch2_mark_stripe(c, k, fs_usage, journal_seq, flags);
1266 case KEY_TYPE_inode:
1267 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1268 fs_usage->nr_inodes++;
1270 fs_usage->nr_inodes--;
1272 case KEY_TYPE_reservation: {
1273 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1275 sectors *= replicas;
1276 replicas = clamp_t(unsigned, replicas, 1,
1277 ARRAY_SIZE(fs_usage->persistent_reserved));
1279 fs_usage->reserved += sectors;
1280 fs_usage->persistent_reserved[replicas - 1] += sectors;
1290 int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
1291 unsigned offset, s64 sectors,
1292 struct bch_fs_usage *fs_usage,
1293 u64 journal_seq, unsigned flags)
1297 percpu_down_read(&c->mark_lock);
1298 ret = bch2_mark_key_locked(c, k, offset, sectors,
1299 fs_usage, journal_seq, flags);
1300 percpu_up_read(&c->mark_lock);
1305 inline int bch2_mark_overwrite(struct btree_trans *trans,
1306 struct btree_iter *iter,
1307 struct bkey_s_c old,
1309 struct bch_fs_usage *fs_usage,
1313 struct bch_fs *c = trans->c;
1314 unsigned offset = 0;
1315 s64 sectors = -((s64) old.k->size);
1317 flags |= BTREE_TRIGGER_OVERWRITE;
1320 ? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
1321 : bkey_cmp(new->k.p, old.k->p))
1325 switch (bch2_extent_overlap(&new->k, old.k)) {
1326 case BCH_EXTENT_OVERLAP_ALL:
1328 sectors = -((s64) old.k->size);
1330 case BCH_EXTENT_OVERLAP_BACK:
1331 offset = bkey_start_offset(&new->k) -
1332 bkey_start_offset(old.k);
1333 sectors = bkey_start_offset(&new->k) -
1336 case BCH_EXTENT_OVERLAP_FRONT:
1338 sectors = bkey_start_offset(old.k) -
1341 case BCH_EXTENT_OVERLAP_MIDDLE:
1342 offset = bkey_start_offset(&new->k) -
1343 bkey_start_offset(old.k);
1344 sectors = -((s64) new->k.size);
1345 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1349 BUG_ON(sectors >= 0);
1352 return bch2_mark_key_locked(c, old, offset, sectors, fs_usage,
1353 trans->journal_res.seq, flags) ?: 1;
1356 int bch2_mark_update(struct btree_trans *trans,
1357 struct btree_iter *iter,
1358 struct bkey_i *insert,
1359 struct bch_fs_usage *fs_usage,
1362 struct bch_fs *c = trans->c;
1363 struct btree *b = iter->l[0].b;
1364 struct btree_node_iter node_iter = iter->l[0].iter;
1365 struct bkey_packed *_k;
1368 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1371 if (!btree_node_type_needs_gc(iter->btree_id))
1374 bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
1376 fs_usage, trans->journal_res.seq,
1377 BTREE_TRIGGER_INSERT|flags);
1379 if (unlikely(flags & BTREE_TRIGGER_NOOVERWRITES))
1383 * For non extents, we only mark the new key, not the key being
1384 * overwritten - unless we're actually deleting:
1386 if ((iter->btree_id == BTREE_ID_ALLOC ||
1387 iter->btree_id == BTREE_ID_EC) &&
1388 !bkey_deleted(&insert->k))
1391 while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1392 struct bkey unpacked;
1393 struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
1395 ret = bch2_mark_overwrite(trans, iter, k, insert,
1397 btree_node_type_is_extents(iter->btree_id));
1401 bch2_btree_node_iter_advance(&node_iter, b);
1407 void bch2_trans_fs_usage_apply(struct btree_trans *trans,
1408 struct bch_fs_usage *fs_usage)
1410 struct bch_fs *c = trans->c;
1411 struct btree_insert_entry *i;
1412 static int warned_disk_usage = 0;
1413 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
1416 if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res,
1417 trans->journal_res.seq) ||
1418 warned_disk_usage ||
1419 xchg(&warned_disk_usage, 1))
1422 bch_err(c, "disk usage increased more than %llu sectors reserved",
1425 trans_for_each_update(trans, i) {
1426 struct btree_iter *iter = i->iter;
1427 struct btree *b = iter->l[0].b;
1428 struct btree_node_iter node_iter = iter->l[0].iter;
1429 struct bkey_packed *_k;
1431 pr_err("while inserting");
1432 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
1434 pr_err("overlapping with");
1436 node_iter = iter->l[0].iter;
1437 while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1438 struct bkey unpacked;
1441 k = bkey_disassemble(b, _k, &unpacked);
1443 if (btree_node_is_extents(b)
1444 ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
1445 : bkey_cmp(i->k->k.p, k.k->p))
1448 bch2_bkey_val_to_text(&PBUF(buf), c, k);
1451 bch2_btree_node_iter_advance(&node_iter, b);
1458 static int trans_get_key(struct btree_trans *trans,
1459 enum btree_id btree_id, struct bpos pos,
1460 struct btree_iter **iter,
1463 struct btree_insert_entry *i;
1466 trans_for_each_update(trans, i)
1467 if (i->iter->btree_id == btree_id &&
1468 (btree_node_type_is_extents(btree_id)
1469 ? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
1470 bkey_cmp(pos, i->k->k.p) < 0
1471 : !bkey_cmp(pos, i->iter->pos))) {
1473 *k = bkey_i_to_s_c(i->k);
1477 *iter = bch2_trans_get_iter(trans, btree_id, pos,
1478 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1480 return PTR_ERR(*iter);
1482 *k = bch2_btree_iter_peek_slot(*iter);
1485 bch2_trans_iter_put(trans, *iter);
1489 static int bch2_trans_mark_pointer(struct btree_trans *trans,
1490 struct bkey_s_c k, struct extent_ptr_decoded p,
1491 s64 sectors, enum bch_data_type data_type)
1493 struct bch_fs *c = trans->c;
1494 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1495 struct btree_iter *iter;
1496 struct bkey_s_c k_a;
1497 struct bkey_alloc_unpacked u;
1498 struct bkey_i_alloc *a;
1501 ret = trans_get_key(trans, BTREE_ID_ALLOC,
1502 POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr)),
1507 if (k_a.k->type != KEY_TYPE_alloc ||
1508 (!ret && unlikely(!test_bit(BCH_FS_ALLOC_WRITTEN, &c->flags)))) {
1510 * During journal replay, and if gc repairs alloc info at
1511 * runtime, the alloc info in the btree might not be up to date
1512 * yet - so, trust the in memory mark - unless we're already
1513 * updating that key:
1516 struct bucket_mark m;
1518 percpu_down_read(&c->mark_lock);
1519 g = bucket(ca, iter->pos.offset);
1520 m = READ_ONCE(g->mark);
1521 u = alloc_mem_to_key(g, m);
1522 percpu_up_read(&c->mark_lock);
1524 u = bch2_alloc_unpack(k_a);
1527 ret = __mark_pointer(c, k, p, sectors, data_type, u.gen, &u.data_type,
1528 &u.dirty_sectors, &u.cached_sectors);
1532 a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
1533 ret = PTR_ERR_OR_ZERO(a);
1537 bkey_alloc_init(&a->k_i);
1539 bch2_alloc_pack(a, u);
1540 bch2_trans_update(trans, iter, &a->k_i, 0);
1542 bch2_trans_iter_put(trans, iter);
1546 static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
1547 struct bch_extent_stripe_ptr p,
1548 s64 sectors, enum bch_data_type data_type,
1549 struct bch_replicas_padded *r,
1551 unsigned *nr_parity)
1553 struct bch_fs *c = trans->c;
1554 struct btree_iter *iter;
1556 struct bkey_i_stripe *s;
1559 ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
1563 if (k.k->type != KEY_TYPE_stripe) {
1564 bch2_fs_inconsistent(c,
1565 "pointer to nonexistent stripe %llu",
1571 s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1572 ret = PTR_ERR_OR_ZERO(s);
1576 bkey_reassemble(&s->k_i, k);
1578 stripe_blockcount_set(&s->v, p.block,
1579 stripe_blockcount_get(&s->v, p.block) +
1582 *nr_data = s->v.nr_blocks - s->v.nr_redundant;
1583 *nr_parity = s->v.nr_redundant;
1584 bch2_bkey_to_replicas(&r->e, bkey_i_to_s_c(&s->k_i));
1585 bch2_trans_update(trans, iter, &s->k_i, 0);
1587 bch2_trans_iter_put(trans, iter);
1591 static int bch2_trans_mark_extent(struct btree_trans *trans,
1592 struct bkey_s_c k, unsigned offset,
1593 s64 sectors, unsigned flags,
1594 enum bch_data_type data_type)
1596 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1597 const union bch_extent_entry *entry;
1598 struct extent_ptr_decoded p;
1599 struct bch_replicas_padded r;
1600 s64 dirty_sectors = 0;
1604 r.e.data_type = data_type;
1606 r.e.nr_required = 1;
1610 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1611 s64 disk_sectors = data_type == BCH_DATA_BTREE
1613 : ptr_disk_sectors_delta(p, offset, sectors, flags);
1615 ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
1624 update_cached_sectors_list(trans, p.ptr.dev,
1626 } else if (!p.has_ec) {
1627 dirty_sectors += disk_sectors;
1628 r.e.devs[r.e.nr_devs++] = p.ptr.dev;
1630 struct bch_replicas_padded ec_r;
1631 unsigned nr_data, nr_parity;
1634 ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
1635 disk_sectors, data_type,
1636 &ec_r, &nr_data, &nr_parity);
1641 __ptr_disk_sectors_delta(p.crc.live_size,
1642 offset, sectors, flags,
1643 p.crc.compressed_size * nr_parity,
1644 p.crc.uncompressed_size * nr_data);
1646 update_replicas_list(trans, &ec_r.e,
1647 disk_sectors + parity_sectors);
1649 r.e.nr_required = 0;
1654 update_replicas_list(trans, &r.e, dirty_sectors);
1659 static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
1660 struct bkey_s_c_reflink_p p,
1661 u64 idx, unsigned sectors,
1664 struct bch_fs *c = trans->c;
1665 struct btree_iter *iter;
1667 struct bkey_i_reflink_v *r_v;
1670 ret = trans_get_key(trans, BTREE_ID_REFLINK,
1671 POS(0, idx), &iter, &k);
1675 if (k.k->type != KEY_TYPE_reflink_v) {
1676 bch2_fs_inconsistent(c,
1677 "%llu:%llu len %u points to nonexistent indirect extent %llu",
1678 p.k->p.inode, p.k->p.offset, p.k->size, idx);
1683 if ((flags & BTREE_TRIGGER_OVERWRITE) &&
1684 (bkey_start_offset(k.k) < idx ||
1685 k.k->p.offset > idx + sectors))
1688 sectors = k.k->p.offset - idx;
1690 r_v = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1691 ret = PTR_ERR_OR_ZERO(r_v);
1695 bkey_reassemble(&r_v->k_i, k);
1697 le64_add_cpu(&r_v->v.refcount,
1698 !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
1700 if (!r_v->v.refcount) {
1701 r_v->k.type = KEY_TYPE_deleted;
1702 set_bkey_val_u64s(&r_v->k, 0);
1705 bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1706 BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1708 bch2_trans_update(trans, iter, &r_v->k_i, 0);
1712 bch2_trans_iter_put(trans, iter);
1716 static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
1717 struct bkey_s_c_reflink_p p, unsigned offset,
1718 s64 sectors, unsigned flags)
1720 u64 idx = le64_to_cpu(p.v->idx) + offset;
1723 sectors = abs(sectors);
1724 BUG_ON(offset + sectors > p.k->size);
1727 ret = __bch2_trans_mark_reflink_p(trans, p, idx, sectors, flags);
1732 sectors = max_t(s64, 0LL, sectors - ret);
1739 int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
1740 unsigned offset, s64 sectors, unsigned flags)
1742 struct replicas_delta_list *d;
1743 struct bch_fs *c = trans->c;
1745 switch (k.k->type) {
1746 case KEY_TYPE_btree_ptr:
1747 case KEY_TYPE_btree_ptr_v2:
1748 sectors = !(flags & BTREE_TRIGGER_OVERWRITE)
1749 ? c->opts.btree_node_size
1750 : -c->opts.btree_node_size;
1752 return bch2_trans_mark_extent(trans, k, offset, sectors,
1753 flags, BCH_DATA_BTREE);
1754 case KEY_TYPE_extent:
1755 case KEY_TYPE_reflink_v:
1756 return bch2_trans_mark_extent(trans, k, offset, sectors,
1757 flags, BCH_DATA_USER);
1758 case KEY_TYPE_inode:
1759 d = replicas_deltas_realloc(trans, 0);
1761 if (!(flags & BTREE_TRIGGER_OVERWRITE))
1766 case KEY_TYPE_reservation: {
1767 unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1769 d = replicas_deltas_realloc(trans, 0);
1771 sectors *= replicas;
1772 replicas = clamp_t(unsigned, replicas, 1,
1773 ARRAY_SIZE(d->persistent_reserved));
1775 d->persistent_reserved[replicas - 1] += sectors;
1778 case KEY_TYPE_reflink_p:
1779 return bch2_trans_mark_reflink_p(trans,
1780 bkey_s_c_to_reflink_p(k),
1781 offset, sectors, flags);
1787 int bch2_trans_mark_update(struct btree_trans *trans,
1788 struct btree_iter *iter,
1789 struct bkey_i *insert,
1792 struct btree *b = iter->l[0].b;
1793 struct btree_node_iter node_iter = iter->l[0].iter;
1794 struct bkey_packed *_k;
1797 if (unlikely(flags & BTREE_TRIGGER_NORUN))
1800 if (!btree_node_type_needs_gc(iter->btree_id))
1803 ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
1804 0, insert->k.size, BTREE_TRIGGER_INSERT);
1808 if (unlikely(flags & BTREE_TRIGGER_NOOVERWRITES))
1811 while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
1812 struct bkey unpacked;
1814 unsigned offset = 0;
1816 unsigned flags = BTREE_TRIGGER_OVERWRITE;
1818 k = bkey_disassemble(b, _k, &unpacked);
1820 if (btree_node_is_extents(b)
1821 ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0
1822 : bkey_cmp(insert->k.p, k.k->p))
1825 if (btree_node_is_extents(b)) {
1826 switch (bch2_extent_overlap(&insert->k, k.k)) {
1827 case BCH_EXTENT_OVERLAP_ALL:
1829 sectors = -((s64) k.k->size);
1831 case BCH_EXTENT_OVERLAP_BACK:
1832 offset = bkey_start_offset(&insert->k) -
1833 bkey_start_offset(k.k);
1834 sectors = bkey_start_offset(&insert->k) -
1837 case BCH_EXTENT_OVERLAP_FRONT:
1839 sectors = bkey_start_offset(k.k) -
1842 case BCH_EXTENT_OVERLAP_MIDDLE:
1843 offset = bkey_start_offset(&insert->k) -
1844 bkey_start_offset(k.k);
1845 sectors = -((s64) insert->k.size);
1846 flags |= BTREE_TRIGGER_OVERWRITE_SPLIT;
1850 BUG_ON(sectors >= 0);
1853 ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
1857 bch2_btree_node_iter_advance(&node_iter, b);
1863 /* Disk reservations: */
1865 static u64 bch2_recalc_sectors_available(struct bch_fs *c)
1867 percpu_u64_set(&c->pcpu->sectors_available, 0);
1869 return avail_factor(__bch2_fs_usage_read_short(c).free);
1872 void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
1874 percpu_down_read(&c->mark_lock);
1875 this_cpu_sub(c->usage[0]->online_reserved,
1877 percpu_up_read(&c->mark_lock);
1882 #define SECTORS_CACHE 1024
1884 int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1885 unsigned sectors, int flags)
1887 struct bch_fs_pcpu *pcpu;
1889 s64 sectors_available;
1892 percpu_down_read(&c->mark_lock);
1894 pcpu = this_cpu_ptr(c->pcpu);
1896 if (sectors <= pcpu->sectors_available)
1899 v = atomic64_read(&c->sectors_available);
1902 get = min((u64) sectors + SECTORS_CACHE, old);
1904 if (get < sectors) {
1906 percpu_up_read(&c->mark_lock);
1909 } while ((v = atomic64_cmpxchg(&c->sectors_available,
1910 old, old - get)) != old);
1912 pcpu->sectors_available += get;
1915 pcpu->sectors_available -= sectors;
1916 this_cpu_add(c->usage[0]->online_reserved, sectors);
1917 res->sectors += sectors;
1920 percpu_up_read(&c->mark_lock);
1924 percpu_down_write(&c->mark_lock);
1926 sectors_available = bch2_recalc_sectors_available(c);
1928 if (sectors <= sectors_available ||
1929 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1930 atomic64_set(&c->sectors_available,
1931 max_t(s64, 0, sectors_available - sectors));
1932 this_cpu_add(c->usage[0]->online_reserved, sectors);
1933 res->sectors += sectors;
1936 atomic64_set(&c->sectors_available, sectors_available);
1940 percpu_up_write(&c->mark_lock);
1945 /* Startup/shutdown: */
1947 static void buckets_free_rcu(struct rcu_head *rcu)
1949 struct bucket_array *buckets =
1950 container_of(rcu, struct bucket_array, rcu);
1953 sizeof(struct bucket_array) +
1954 buckets->nbuckets * sizeof(struct bucket));
1957 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1959 struct bucket_array *buckets = NULL, *old_buckets = NULL;
1960 unsigned long *buckets_nouse = NULL;
1961 alloc_fifo free[RESERVE_NR];
1962 alloc_fifo free_inc;
1963 alloc_heap alloc_heap;
1964 copygc_heap copygc_heap;
1966 size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
1967 ca->mi.bucket_size / c->opts.btree_node_size);
1968 /* XXX: these should be tunable */
1969 size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
1970 size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 7);
1971 size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
1973 bool resize = ca->buckets[0] != NULL,
1974 start_copygc = ca->copygc_thread != NULL;
1978 memset(&free, 0, sizeof(free));
1979 memset(&free_inc, 0, sizeof(free_inc));
1980 memset(&alloc_heap, 0, sizeof(alloc_heap));
1981 memset(©gc_heap, 0, sizeof(copygc_heap));
1983 if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
1984 nbuckets * sizeof(struct bucket),
1985 GFP_KERNEL|__GFP_ZERO)) ||
1986 !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1987 sizeof(unsigned long),
1988 GFP_KERNEL|__GFP_ZERO)) ||
1989 !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
1990 !init_fifo(&free[RESERVE_MOVINGGC],
1991 copygc_reserve, GFP_KERNEL) ||
1992 !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
1993 !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
1994 !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) ||
1995 !init_heap(©gc_heap, copygc_reserve, GFP_KERNEL))
1998 buckets->first_bucket = ca->mi.first_bucket;
1999 buckets->nbuckets = nbuckets;
2001 bch2_copygc_stop(ca);
2004 down_write(&c->gc_lock);
2005 down_write(&ca->bucket_lock);
2006 percpu_down_write(&c->mark_lock);
2009 old_buckets = bucket_array(ca);
2012 size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
2016 n * sizeof(struct bucket));
2017 memcpy(buckets_nouse,
2019 BITS_TO_LONGS(n) * sizeof(unsigned long));
2022 rcu_assign_pointer(ca->buckets[0], buckets);
2023 buckets = old_buckets;
2025 swap(ca->buckets_nouse, buckets_nouse);
2028 percpu_up_write(&c->mark_lock);
2030 spin_lock(&c->freelist_lock);
2031 for (i = 0; i < RESERVE_NR; i++) {
2032 fifo_move(&free[i], &ca->free[i]);
2033 swap(ca->free[i], free[i]);
2035 fifo_move(&free_inc, &ca->free_inc);
2036 swap(ca->free_inc, free_inc);
2037 spin_unlock(&c->freelist_lock);
2039 /* with gc lock held, alloc_heap can't be in use: */
2040 swap(ca->alloc_heap, alloc_heap);
2042 /* and we shut down copygc: */
2043 swap(ca->copygc_heap, copygc_heap);
2045 nbuckets = ca->mi.nbuckets;
2048 up_write(&ca->bucket_lock);
2049 up_write(&c->gc_lock);
2053 bch2_copygc_start(c, ca))
2054 bch_err(ca, "error restarting copygc thread");
2058 free_heap(©gc_heap);
2059 free_heap(&alloc_heap);
2060 free_fifo(&free_inc);
2061 for (i = 0; i < RESERVE_NR; i++)
2062 free_fifo(&free[i]);
2063 kvpfree(buckets_nouse,
2064 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
2066 call_rcu(&old_buckets->rcu, buckets_free_rcu);
2071 void bch2_dev_buckets_free(struct bch_dev *ca)
2075 free_heap(&ca->copygc_heap);
2076 free_heap(&ca->alloc_heap);
2077 free_fifo(&ca->free_inc);
2078 for (i = 0; i < RESERVE_NR; i++)
2079 free_fifo(&ca->free[i]);
2080 kvpfree(ca->buckets_nouse,
2081 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
2082 kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
2083 sizeof(struct bucket_array) +
2084 ca->mi.nbuckets * sizeof(struct bucket));
2086 free_percpu(ca->usage[0]);
2089 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
2091 if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
2094 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;